diff -Nru docker.io-0.9.1~dfsg1/api/api_unit_test.go docker.io-1.3.2~dfsg1/api/api_unit_test.go --- docker.io-0.9.1~dfsg1/api/api_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/api_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,6 @@ package api import ( - "fmt" - "net/http" - "net/http/httptest" "testing" ) @@ -20,46 +17,3 @@ t.Fail() } } - -func TestGetBoolParam(t *testing.T) { - if ret, err := getBoolParam("true"); err != nil || !ret { - t.Fatalf("true -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("True"); err != nil || !ret { - t.Fatalf("True -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("1"); err != nil || !ret { - t.Fatalf("1 -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam(""); err != nil || ret { - t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("false"); err != nil || ret { - t.Fatalf("false -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("0"); err != nil || ret { - t.Fatalf("0 -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("faux"); err == nil || ret { - t.Fatalf("faux -> false, err | got %t %s", ret, err) - } -} - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} diff -Nru docker.io-0.9.1~dfsg1/api/client/cli.go docker.io-1.3.2~dfsg1/api/client/cli.go --- docker.io-0.9.1~dfsg1/api/client/cli.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/client/cli.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,148 @@ +package client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "os" + "reflect" + "strings" + "text/template" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" +) + +type DockerCli struct { + proto string + addr string + configFile *registry.ConfigFile + in io.ReadCloser + out io.Writer + err io.Writer + key libtrust.PrivateKey + tlsConfig *tls.Config + scheme string + // inFd holds file descriptor of the client's STDIN, if it's a valid file + inFd uintptr + // outFd holds file descriptor of the client's STDOUT, if it's a valid file + outFd uintptr + // isTerminalIn describes if client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut describes if client's STDOUT is a TTY + isTerminalOut bool +} + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) { + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, false + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(cli).MethodByName(methodName) + if !method.IsValid() { + return nil, false + } + return method.Interface().(func(...string) error), true +} + +// Cmd executes the specified command +func (cli *DockerCli) Cmd(args ...string) error { + if len(args) > 1 { + method, exists := cli.getMethod(args[:2]...) + if exists { + return method(args[2:]...) + } + } + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Println("Error: Command not found:", args[0]) + return cli.CmdHelp(args[1:]...) + } + return method(args[1:]...) + } + return cli.CmdHelp(args...) +} + +func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { + flags := flag.NewFlagSet(name, flag.ContinueOnError) + flags.Usage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = "[OPTIONS] " + } + fmt.Fprintf(cli.err, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description) + flags.PrintDefaults() + os.Exit(2) + } + return flags +} + +func (cli *DockerCli) LoadConfigFile() (err error) { + cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) + if err != nil { + fmt.Fprintf(cli.err, "WARNING: %s\n", err) + } + return err +} + +func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli { + var ( + inFd uintptr + outFd uintptr + isTerminalIn = false + isTerminalOut = false + scheme = "http" + ) + + if tlsConfig != nil { + scheme = "https" + } + + if in != nil { + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = term.IsTerminal(inFd) + } + } + + if out != nil { + if file, ok := out.(*os.File); ok { + outFd = file.Fd() + isTerminalOut = term.IsTerminal(outFd) + } + } + + if err == nil { + err = out + } + + return &DockerCli{ + proto: proto, + addr: addr, + in: in, + out: out, + err: err, + key: key, + inFd: inFd, + outFd: outFd, + isTerminalIn: isTerminalIn, + isTerminalOut: isTerminalOut, + tlsConfig: tlsConfig, + scheme: scheme, + } +} diff -Nru docker.io-0.9.1~dfsg1/api/client/commands.go docker.io-1.3.2~dfsg1/api/client/commands.go --- docker.io-0.9.1~dfsg1/api/client/commands.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/client/commands.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2548 @@ +package client + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "text/tabwriter" + "text/template" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/nat" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +const ( + tarHeaderSize = 512 +) + +func (cli *DockerCli) CmdHelp(args ...string) error { + if len(args) > 1 { + method, exists := cli.getMethod(args[:2]...) + if exists { + method("--help") + return nil + } + } + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) + } else { + method("--help") + return nil + } + } + + flag.Usage() + + return nil +} + +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH") + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + context archive.Archive + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + if cmd.Arg(0) == "-" { + // As a special case, 'docker build -' will build from either an empty context with the + // contents of stdin as a Dockerfile, or a tar-ed context from stdin. + buf := bufio.NewReader(cli.in) + magic, err := buf.Peek(tarHeaderSize) + if err != nil && err != io.EOF { + return fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + if !archive.IsArchive(magic) { + dockerfile, err := ioutil.ReadAll(buf) + if err != nil { + return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) + } + context, err = archive.Generate("Dockerfile", string(dockerfile)) + } else { + context = ioutil.NopCloser(buf) + } + } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + isRemote = true + } else { + root := cmd.Arg(0) + if utils.IsGIT(root) { + remoteURL := cmd.Arg(0) + if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + remoteURL = "https://" + remoteURL + } + + root, err = ioutil.TempDir("", "docker-build-git") + if err != nil { + return err + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + if _, err := os.Stat(root); err != nil { + return err + } + filename := path.Join(root, "Dockerfile") + if _, err = os.Stat(filename); os.IsNotExist(err) { + return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) + } + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("Error reading .dockerignore: '%s'", err) + } + for _, pattern := range strings.Split(string(ignore), "\n") { + ok, err := filepath.Match(pattern, "Dockerfile") + if err != nil { + return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) + } + if ok { + return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern) + } + excludes = append(excludes, pattern) + } + if err = utils.ValidateContextDirectory(root, excludes); err != nil { + return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) + } + options := &archive.TarOptions{ + Compression: archive.Uncompressed, + Excludes: excludes, + } + context, err = archive.TarWithOptions(root, options) + if err != nil { + return err + } + } + var body io.Reader + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + if context != nil { + sf := utils.NewStreamFormatter(false) + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon") + } + // Send the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, tag := parsers.ParseRepositoryTag(*tag) + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + if len(tag) > 0 { + if err := graph.ValidateTagName(tag); err != nil { + return err + } + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } else { + v.Set("rm", "0") + } + + if *forceRm { + v.Set("forcerm", "1") + } + + cli.LoadConfigFile() + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + if context != nil { + headers.Set("Content-Type", "application/tar") + } + err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) + if jerr, ok := err.(*utils.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err +} + +// 'docker login': login / register a user to registry service. +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + err := cmd.Parse(args) + if err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + cli.LoadConfigFile() + authconfig, ok := cli.configFile.Configs[serverAddress] + if !ok { + authconfig = registry.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + if username == "" { + username = authconfig.Username + } + } + // Assume that a different username means they may not want to use + // the password or email from the config file, so prompt them + if username != authconfig.Username { + if password == "" { + oldState, _ := term.SaveState(cli.inFd) + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // password or email from the cmd line if specified. IOW, allow + // then to change/overide them. And if not specified, just + // use what's in the config file + if password == "" { + password = authconfig.Password + } + if email == "" { + email = authconfig.Email + } + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.Configs[serverAddress] = authconfig + + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) + if statusCode == 401 { + delete(cli.configFile.Configs, serverAddress) + registry.SaveConfig(cli.configFile) + return err + } + if err != nil { + return err + } + var out2 engine.Env + err = out2.Decode(stream) + if err != nil { + cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) + return err + } + registry.SaveConfig(cli.configFile) + if out2.Get("Status") != "" { + fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) + } + return nil +} + +// log out from a Docker registry +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + if err := cmd.Parse(args); err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + cli.LoadConfigFile() + if _, ok := cli.configFile.Configs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + } else { + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.Configs, serverAddress) + + if err := registry.SaveConfig(cli.configFile); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + } + return nil +} + +// 'docker wait': block until a container stops +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + var encounteredError error + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to wait one or more containers") + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + return encounteredError +} + +// 'docker version': show version information +func (cli *DockerCli) CmdVersion(args ...string) error { + cmd := cli.Subcmd("version", "", "Show the Docker version information.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + if dockerversion.VERSION != "" { + fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) + } + fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) + fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) + if dockerversion.GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) + } + fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH) + + body, _, err := readBody(cli.call("GET", "/version", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteVersion, err := out.AddEnv() + if err != nil { + log.Errorf("Error reading remote version: %s", err) + return err + } + if _, err := out.Write(body); err != nil { + log.Errorf("Error reading remote version: %s", err) + return err + } + out.Close() + fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) + if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { + fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) + } + fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) + return nil +} + +// 'docker info': display system-wide information. +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := cli.Subcmd("info", "", "Display system-wide information") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/info", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteInfo, err := out.AddEnv() + if err != nil { + return err + } + + if _, err := out.Write(body); err != nil { + log.Errorf("Error reading remote info: %s", err) + return err + } + out.Close() + + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) + + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + + if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { + fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) + } + if initPath := remoteInfo.Get("InitPath"); initPath != "" { + fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) + } + } + + if len(remoteInfo.GetList("IndexServerAddress")) != 0 { + cli.LoadConfigFile() + u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) + } + } + if !remoteInfo.GetBool("MemoryLimit") { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !remoteInfo.GetBool("SwapLimit") { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !remoteInfo.GetBool("IPv4Forwarding") { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + return nil +} + +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to stop one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to restart one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == syscall.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + log.Errorf("Unsupported signal: %d. Discarding.", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { + log.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +func (cli *DockerCli) CmdStart(args ...string) error { + var ( + cErr chan error + tty bool + + cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") + attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process") + openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + config := env.GetSubEnv("Config") + tty = config.GetBool("Tty") + + if !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + + if *openStdin && config.GetBool("OpenStdin") { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + cErr = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil) + }) + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) + if err != nil { + if !*attach || !*openStdin { + fmt.Fprintf(cli.err, "%s\n", err) + } + encounteredError = fmt.Errorf("Error: failed to start one or more containers") + } else { + if !*attach || !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + if encounteredError != nil { + if *openStdin || *attach { + cli.in.Close() + } + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + return <-cErr + } + return nil +} + +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to pause container named %s", name) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image") + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var tmpl *template.Template + if *tmplStr != "" { + var err error + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + return &utils.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + indented := new(bytes.Buffer) + indented.WriteByte('[') + status := 0 + + for _, name := range cmd.Args() { + obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) + if err != nil { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if tmpl == nil { + if err = json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + // Has template, will render + var value interface{} + if err := json.Unmarshal(obj, &value); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, value); err != nil { + return err + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteByte(']') + + if tmpl == nil { + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() == 0 { + cmd.Usage() + return nil + } + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) + if err != nil { + return err + } + var procs engine.Env + if err := procs.Decode(stream); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + ports := nat.PortMap{} + if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort) + } + } + + return nil +} + +// 'docker rmi IMAGE' removes all images with the name IMAGE +func (cli *DockerCli) CmdRmi(args ...string) error { + var ( + cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") + force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + ) + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + } else { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + continue + } + for _, out := range outs.Data { + if out.Get("Deleted") != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) + } + } + } + } + return encounteredError +} + +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") + } + + for _, out := range outs.Data { + outID := out.Get("Id") + if !*quiet { + if *noTrunc { + fmt.Fprintf(w, "%s\t", outID) + } else { + fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) + } + + fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + + if *noTrunc { + fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) + } else { + fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) + } + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size"))) + } else { + if *noTrunc { + fmt.Fprintln(w, outID) + } else { + fmt.Fprintln(w, utils.TruncateID(outID)) + } + } + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers") + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + + if *force { + val.Set("force", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +// 'docker kill NAME' kills a running container +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal") + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to kill one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var ( + v = url.Values{} + src = cmd.Arg(0) + repository = cmd.Arg(1) + ) + + v.Set("fromSrc", src) + v.Set("repo", repository) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") + v.Set("tag", cmd.Arg(2)) + } + + if repository != "" { + //Check if the given image name can be resolved + repo, _ := parsers.ParseRepositoryTag(repository) + if _, _, err := registry.ResolveRepositoryName(repo); err != nil { + return err + } + } + + var in io.Reader + + if src == "-" { + in = cli.in + } + + return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) +} + +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry") + if err := cmd.Parse(args); err != nil { + return nil + } + name := cmd.Arg(0) + + if name == "" { + cmd.Usage() + return nil + } + + cli.LoadConfigFile() + + remote, tag := parsers.ParseRepositoryTag(name) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if len(strings.SplitN(name, "/", 2)) == 1 { + username := cli.configFile.Configs[registry.IndexServerAddress()].Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) + } + + v := url.Values{} + v.Set("tag", tag) + push := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := push(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to push:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return push(authConfig) + } + return err + } + return nil +} + +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + var ( + v = url.Values{} + remote = cmd.Arg(0) + newRemote = remote + ) + taglessRemote, tag := parsers.ParseRepositoryTag(remote) + if tag == "" && !*allTags { + newRemote = taglessRemote + ":latest" + } + if tag != "" && *allTags { + return fmt.Errorf("tag can't be used with --all-tags/-a") + } + + v.Set("fromImage", newRemote) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(taglessRemote) + if err != nil { + return err + } + + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + + pull := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := pull(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to pull:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return pull(authConfig) + } + return err + } + + return nil +} + +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := cli.Subcmd("images", "[NAME]", "List images") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + // FIXME: --viz and --tree are deprecated. Remove them in a future version. + flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") + flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 1 { + cmd.Usage() + return nil + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + matchName := cmd.Arg(0) + // FIXME: --viz and --tree are deprecated. Remove them in a future version. + if *flViz || *flTree { + v := url.Values{ + "all": []string{"1"}, + } + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + var ( + printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) + startImage *engine.Env + + roots = engine.NewTable("Created", outs.Len()) + byParent = make(map[string]*engine.Table) + ) + + for _, image := range outs.Data { + if image.Get("ParentId") == "" { + roots.Add(image) + } else { + if children, exists := byParent[image.Get("ParentId")]; exists { + children.Add(image) + } else { + byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) + byParent[image.Get("ParentId")].Add(image) + } + } + + if matchName != "" { + if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) { + startImage = image + } + + for _, repotag := range image.GetList("RepoTags") { + if repotag == matchName { + startImage = image + } + } + } + } + + if *flViz { + fmt.Fprintf(cli.out, "digraph docker {\n") + printNode = (*DockerCli).printVizNode + } else { + printNode = (*DockerCli).printTreeNode + } + + if startImage != nil { + root := engine.NewTable("Created", 1) + root.Add(startImage) + cli.WalkTree(*noTrunc, root, byParent, "", printNode) + } else if matchName == "" { + cli.WalkTree(*noTrunc, roots, byParent, "", printNode) + } + if *flViz { + fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") + } + } else { + v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJson, err := filters.ToParam(imageFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + + if cmd.NArg() == 1 { + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) + } + if *all { + v.Set("all", "1") + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + + for _, out := range outs.Data { + for _, repotag := range out.GetList("RepoTags") { + + repo, tag := parsers.ParseRepositoryTag(repotag) + outID := out.Get("Id") + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if !*quiet { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize"))) + } else { + fmt.Fprintln(w, outID) + } + } + } + + if !*quiet { + w.Flush() + } + } + return nil +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { + length := images.Len() + if length > 1 { + for index, image := range images.Data { + if index+1 == length { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } else { + printNode(cli, noTrunc, image, prefix+"\u251C─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) + } + } + } + } else { + for _, image := range images.Data { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } + } +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { + var ( + imageID string + parentID string + ) + if noTrunc { + imageID = image.Get("Id") + parentID = image.Get("ParentId") + } else { + imageID = utils.TruncateID(image.Get("Id")) + parentID = utils.TruncateID(image.Get("ParentId")) + } + if parentID == "" { + fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) + } else { + fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) + } + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", + imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) + } +} + +// FIXME: --viz and --tree are deprecated. Remove them in a future version. +func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { + var imageID string + if noTrunc { + imageID = image.Get("Id") + } else { + imageID = utils.TruncateID(image.Get("Id")) + } + + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize"))) + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) + } else { + fmt.Fprint(cli.out, "\n") + } +} + +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.Args{} + v = url.Values{} + + cmd = cli.Subcmd("ps", "", "List containers") + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") + noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") + since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") + before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") + flFilter = opts.NewListOpts(nil) + ) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited= - containers with exit code of \nstatus=(restarting|running|paused|exited)") + + if err := cmd.Parse(args); err != nil { + return nil + } + + if *last == -1 && *nLatest { + *last = 1 + } + + if *all { + v.Set("all", "1") + } + + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + + if *since != "" { + v.Set("since", *since) + } + + if *before != "" { + v.Set("before", *before) + } + + if *size { + v.Set("size", "1") + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + if len(psFilterArgs) > 0 { + filterJson, err := filters.ToParam(psFilterArgs) + if err != nil { + return err + } + + v.Set("filters", filterJson) + } + + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") + + if *size { + fmt.Fprintln(w, "\tSIZE") + } else { + fmt.Fprint(w, "\n") + } + } + + stripNamePrefix := func(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss + } + + for _, out := range outs.Data { + outID := out.Get("Id") + + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if *quiet { + fmt.Fprintln(w, outID) + + continue + } + + var ( + outNames = stripNamePrefix(out.GetList("Names")) + outCommand = strconv.Quote(out.Get("Command")) + ports = engine.NewTable("", 0) + ) + + if !*noTrunc { + outCommand = utils.Trunc(outCommand, 20) + + // only display the default name for the container with notrunc is passed + for _, name := range outNames { + if len(strings.Split(name, "/")) == 1 { + outNames = []string{name} + + break + } + } + } + + ports.ReadListFrom([]byte(out.Get("Ports"))) + + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, + units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), + out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + + if *size { + if out.GetInt("SizeRootFs") > 0 { + fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs"))) + } else { + fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw"))) + } + + continue + } + + fmt.Fprint(w, "\n") + } + + if !*quiet { + w.Flush() + } + + return nil +} + +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + if err := cmd.Parse(args); err != nil { + return nil + } + + var ( + name = cmd.Arg(0) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + ) + + if name == "" || len(cmd.Args()) > 2 { + cmd.Usage() + return nil + } + + //Check if the given image name can be resolved + if repository != "" { + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + + if *flPause != true { + v.Set("pause", "0") + } + + var ( + config *runconfig.Config + env engine.Env + ) + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) + if err != nil { + return err + } + if err := env.Decode(stream); err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) + return nil +} + +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := cli.Subcmd("events", "", "Get real time events from the server") + since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + var ( + v = url.Values{} + loc = time.FixedZone(time.Now().Zone()) + ) + var setTime = func(key, value string) { + format := timeutils.RFC3339NanoFixed + if len(value) < len(format) { + format = format[:len(value)] + } + if t, err := time.ParseInLocation(format, value, loc); err == nil { + v.Set(key, strconv.FormatInt(t.Unix(), 10)) + } else { + v.Set(key, value) + } + } + if *since != "" { + setTime("since", *since) + } + if *until != "" { + setTime("until", *until) + } + if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + for _, change := range outs.Data { + var kind string + switch change.GetInt("Kind") { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) + } + return nil +} + +func (cli *DockerCli) CmdLogs(args ...string) error { + var ( + cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") + follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + + steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(steam); err != nil { + return err + } + + v := url.Values{} + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *times { + v.Set("timestamps", "1") + } + + if *follow { + v.Set("follow", "1") + } + v.Set("tail", *tail) + + return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) +} + +func (cli *DockerCli) CmdAttach(args ...string) error { + var ( + cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container") + noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") + ) + + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + + stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + if err != nil { + return err + } + + env := engine.Env{} + if err := env.Decode(stream); err != nil { + return err + } + + if !env.GetSubEnv("State").GetBool("Running") { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + var ( + config = env.GetSubEnv("Config") + tty = config.GetBool("Tty") + ) + + if tty && cli.isTerminalOut { + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + log.Debugf("Error monitoring TTY size: %s", err) + } + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if !*noStdin && config.GetBool("OpenStdin") { + v.Set("stdin", "1") + in = cli.in + } + + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *proxy && !tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { + return err + } + + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + + return nil +} + +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("term", cmd.Arg(0)) + + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) + + if err != nil { + return err + } + outs := engine.NewTable("star_count", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, out := range outs.Data { + if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) { + continue + } + desc := strings.Replace(out.Get("description"), "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = utils.Trunc(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) + if out.GetBool("is_official") { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if out.GetBool("is_automated") || out.GetBool("is_trusted") { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// Ports type - Used to parse multiple -p flags +type ports []int + +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var ( + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + v = url.Values{} + ) + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + v := url.Values{} + repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = "latest" + } + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(repos) + if err != nil { + return err + } + + // Load the auth config file, to be able to pull the image + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + return err + } + return nil +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (engine.Env, error) { + containerValues := url.Values{} + if name != "" { + containerValues.Set("name", name) + } + + mergedConfig := runconfig.MergeConfigs(config, hostConfig) + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + //create the container + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) + //if image not found try to pull it + if statusCode == 404 { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + // Retry + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + var result engine.Env + if err := result.Decode(stream); err != nil { + return nil, err + } + + for _, warning := range result.GetList("Warnings") { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + + if containerIDFile != nil { + if err = containerIDFile.Write(result.Get("Id")); err != nil { + return nil, err + } + } + + return result, nil + +} + +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container") + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + createResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", createResult.Get("Id")) + + return nil +} + +func (cli *DockerCli) CmdRun(args ...string) error { + // FIXME: just use runconfig.Parse already + cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container") + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID") + flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") + flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + if *flDetach { + if fl := cmd.Lookup("attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy in case on TTY + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + runResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + + if sigProxy { + sigc := cli.forwardAllSignals(runResult.Get("Id")) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayId chan struct{} + errCh chan error + ) + + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchrone in order to let the client write to stdin before having to read the ID + waitDisplayId = make(chan struct{}) + go func() { + defer close(waitDisplayId) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) + }() + } + + if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") { + return ErrConflictRestartPolicyAndAutoRemove + } + + // We need to instanciate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + + // Block the return until the chan gets closed + defer func() { + log.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + log.Errorf("Hijack did not finish (chan still open)") + } + }() + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + errCh = promise.Go(func() error { + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) + }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayId + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { + return err + } + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { + return err + } + } else { + if !config.Tty { + // In non-tty mode, we can't dettach, so we know we need to wait. + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { + return err + } + } else { + // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call + // and result in a wrong exit code. + // No Autoremove: Simply retrieve the exit code + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + } + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var copyData engine.Env + info := strings.Split(cmd.Arg(0), ":") + + if len(info) != 2 { + return fmt.Errorf("Error: Path not specified") + } + + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) + + stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) + if stream != nil { + defer stream.Close() + } + if statusCode == 404 { + return fmt.Errorf("No such container: %v", info[0]) + } + if err != nil { + return err + } + + if statusCode == 200 { + if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)") + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } + if len(cmd.Args()) == 1 { + image := cmd.Arg(0) + if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { + return err + } + } else { + v := url.Values{} + for _, arg := range cmd.Args() { + v.Add("names", arg) + } + if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container") + + execConfig, err := runconfig.ParseExec(cmd, args) + if err != nil { + return err + } + if execConfig.Container == "" { + cmd.Usage() + return nil + } + + stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, false) + if err != nil { + return err + } + + var execResult engine.Env + if err := execResult.Decode(stream); err != nil { + return err + } + + execID := execResult.Get("Id") + + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + if execConfig.Detach { + if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { + return err + } + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + hijacked = make(chan io.Closer) + errCh chan error + ) + + // Block the return until the chan gets closed + defer func() { + log.Debugf("End of CmdExec(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + log.Errorf("Hijack did not finish (chan still open)") + } + }() + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + errCh = promise.Go(func() error { + return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) + }) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + } + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + log.Errorf("Error monitoring TTY size: %s", err) + } + } + + if err := <-errCh; err != nil { + log.Debugf("Error hijack: %s", err) + return err + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/api/client/hijack.go docker.io-1.3.2~dfsg1/api/client/hijack.go --- docker.io-0.9.1~dfsg1/api/client/hijack.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/client/hijack.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,139 @@ +package client + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { + defer func() { + if started != nil { + close(started) + } + }() + + params, err := cli.encodeData(data) + if err != nil { + return err + } + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Header.Set("Content-Type", "plain/text") + req.Host = cli.addr + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.inFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = promise.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminalIn { + term.RestoreTerminal(cli.inFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if runtime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal && stdout != nil { + _, err = io.Copy(stdout, br) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, br) + } + log.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := promise.Go(func() error { + if in != nil { + io.Copy(rwc, in) + log.Debugf("[hijack] End of stdin") + } + if tcpc, ok := rwc.(*net.TCPConn); ok { + if err := tcpc.CloseWrite(); err != nil { + log.Debugf("Couldn't send EOF: %s", err) + } + } else if unixc, ok := rwc.(*net.UnixConn); ok { + if err := unixc.CloseWrite(); err != nil { + log.Debugf("Couldn't send EOF: %s", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + log.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminalIn { + if err := <-sendStdin; err != nil { + log.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/api/client/utils.go docker.io-1.3.2~dfsg1/api/client/utils.go --- docker.io-0.9.1~dfsg1/api/client/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/client/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,288 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + gosignal "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +var ( + ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +func (cli *DockerCli) HTTPClient() *http.Client { + tr := &http.Transport{ + TLSClientConfig: cli.tlsConfig, + Dial: func(network, addr string) (net.Conn, error) { + // Why 32? See issue 8035 + return net.DialTimeout(cli.proto, cli.addr, 32*time.Second) + }, + } + if cli.proto == "unix" { + // XXX workaround for net/http Transport which caches connections, but is + // intended for tcp connections, not unix sockets. + tr.DisableKeepAlives = true + + // no need in compressing for local communications + tr.DisableCompression = true + } + return &http.Client{Transport: tr} +} + +func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, err + } + if _, err := params.Write(buf); err != nil { + return nil, err + } + } + } + return params, nil +} + +func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { + params, err := cli.encodeData(data) + if err != nil { + return nil, -1, err + } + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return nil, -1, err + } + if passAuthInfo { + cli.LoadConfigFile() + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) + getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil + } + if headers, err := getHeaders(authConfig); err == nil && headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + if data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + resp, err := cli.HTTPClient().Do(req) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if len(body) == 0 { + return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) + } + return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + return resp.Body, resp.StatusCode, nil +} + +func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { + return cli.streamHelper(method, path, true, in, out, nil, headers) +} + +func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + resp, err := cli.HTTPClient().Do(req) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) + } + return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { + return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.outFd, cli.isTerminalOut) + } + if stdout != nil || stderr != nil { + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, resp.Body) + } else { + _, err = stdcopy.StdCopy(stdout, stderr, resp.Body) + } + log.Debugf("[stream] End of stdout") + return err + } + return nil +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + + path := "" + if !isExec { + path = "/containers/" + id + "/resize?" + } else { + path = "/exec/" + id + "/resize?" + } + + if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, false)); err != nil { + log.Debugf("Error resize: %s", err) + } +} + +func waitForExit(cli *DockerCli, containerId string) (int, error) { + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) + if err != nil { + return -1, err + } + + var out engine.Env + if err := out.Decode(stream); err != nil { + return -1, err + } + return out.GetInt("StatusCode"), nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { + steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + var result engine.Env + if err := result.Decode(steam); err != nil { + return false, -1, err + } + + state := result.GetSubEnv("State") + return state.GetBool("Running"), state.GetInt("ExitCode"), nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, syscall.SIGWINCH) + go func() { + for _ = range sigchan { + cli.resizeTty(id, isExec) + } + }() + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + log.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { + if stream != nil { + defer stream.Close() + } + if err != nil { + return nil, statusCode, err + } + body, err := ioutil.ReadAll(stream) + if err != nil { + return nil, -1, err + } + return body, statusCode, nil +} diff -Nru docker.io-0.9.1~dfsg1/api/client.go docker.io-1.3.2~dfsg1/api/client.go --- docker.io-0.9.1~dfsg1/api/client.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2462 +0,0 @@ -package api - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - gosignal "os/signal" - "path" - "reflect" - "regexp" - goruntime "runtime" - "strconv" - "strings" - "syscall" - "text/tabwriter" - "text/template" - "time" -) - -var funcMap = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, -} - -var ( - ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") -) - -func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { - methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) - method := reflect.ValueOf(cli).MethodByName(methodName) - if !method.IsValid() { - return nil, false - } - return method.Interface().(func(...string) error), true -} - -func ParseCommands(proto, addr string, args ...string) error { - cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr) - - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Println("Error: Command not found:", args[0]) - return cli.CmdHelp(args[1:]...) - } - return method(args[1:]...) - } - return cli.CmdHelp(args...) -} - -func (cli *DockerCli) CmdHelp(args ...string) error { - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) - } else { - method("--help") - return nil - } - } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) - for _, command := range [][]string{ - {"attach", "Attach to a running container"}, - {"build", "Build a container from a Dockerfile"}, - {"commit", "Create a new image from a container's changes"}, - {"cp", "Copy files/folders from the containers filesystem to the host path"}, - {"diff", "Inspect changes on a container's filesystem"}, - {"events", "Get real time events from the server"}, - {"export", "Stream the contents of a container as a tar archive"}, - {"history", "Show the history of an image"}, - {"images", "List images"}, - {"import", "Create a new filesystem image from the contents of a tarball"}, - {"info", "Display system-wide information"}, - {"insert", "Insert a file in an image"}, - {"inspect", "Return low-level information on a container"}, - {"kill", "Kill a running container"}, - {"load", "Load an image from a tar archive"}, - {"login", "Register or Login to the docker registry server"}, - {"logs", "Fetch the logs of a container"}, - {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, - {"ps", "List containers"}, - {"pull", "Pull an image or a repository from the docker registry server"}, - {"push", "Push an image or a repository to the docker registry server"}, - {"restart", "Restart a running container"}, - {"rm", "Remove one or more containers"}, - {"rmi", "Remove one or more images"}, - {"run", "Run a command in a new container"}, - {"save", "Save an image to a tar archive"}, - {"search", "Search for an image in the docker index"}, - {"start", "Start a stopped container"}, - {"stop", "Stop a running container"}, - {"tag", "Tag an image into a repository"}, - {"top", "Lookup the running processes of a container"}, - {"version", "Show the docker version information"}, - {"wait", "Block until a container stops, then print its exit code"}, - } { - help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) - } - fmt.Fprintf(cli.err, "%s\n", help) - return nil -} - -func (cli *DockerCli) CmdInsert(args ...string) error { - cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("url", cmd.Arg(1)) - v.Set("path", cmd.Arg(2)) - - return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) -} - -func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") - tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") - suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") - noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") - rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - var ( - context archive.Archive - isRemote bool - err error - ) - - if cmd.Arg(0) == "-" { - // As a special case, 'docker build -' will build from an empty context with the - // contents of stdin as a Dockerfile - dockerfile, err := ioutil.ReadAll(cli.in) - if err != nil { - return err - } - context, err = archive.Generate("Dockerfile", string(dockerfile)) - } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { - isRemote = true - } else { - if _, err := os.Stat(cmd.Arg(0)); err != nil { - return err - } - filename := path.Join(cmd.Arg(0), "Dockerfile") - if _, err = os.Stat(filename); os.IsNotExist(err) { - return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) - } - context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed) - } - var body io.Reader - // Setup an upload progress bar - // FIXME: ProgressReader shouldn't be this annoying to use - if context != nil { - sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") - } - // Upload the build context - v := &url.Values{} - v.Set("t", *tag) - - if *suppressOutput { - v.Set("q", "1") - } - if isRemote { - v.Set("remote", cmd.Arg(0)) - } - if *noCache { - v.Set("nocache", "1") - } - if *rm { - v.Set("rm", "1") - } - - cli.LoadConfigFile() - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(cli.configFile) - if err != nil { - return err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if context != nil { - headers.Set("Content-Type", "application/tar") - } - err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) - if jerr, ok := err.(*utils.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err -} - -// 'docker login': login / register a user to registry service. -func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.") - - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") - err := cmd.Parse(args) - if err != nil { - return nil - } - serverAddress := auth.IndexServerAddress() - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - cli.LoadConfigFile() - authconfig, ok := cli.configFile.Configs[serverAddress] - if !ok { - authconfig = auth.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - if username == "" { - username = authconfig.Username - } - } - if username != authconfig.Username { - if password == "" { - oldState, _ := term.SaveState(cli.terminalFd) - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.terminalFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.terminalFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } - } else { - password = authconfig.Password - email = authconfig.Email - } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.Configs[serverAddress] = authconfig - - stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) - if statusCode == 401 { - delete(cli.configFile.Configs, serverAddress) - auth.SaveConfig(cli.configFile) - return err - } - if err != nil { - return err - } - var out2 engine.Env - err = out2.Decode(stream) - if err != nil { - cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME")) - return err - } - auth.SaveConfig(cli.configFile) - if out2.Get("Status") != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) - } - return nil -} - -// 'docker wait': block until a container stops -func (cli *DockerCli) CmdWait(args ...string) error { - cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - var encounteredError error - for _, name := range cmd.Args() { - status, err := waitForExit(cli, name) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to wait one or more containers") - } else { - fmt.Fprintf(cli.out, "%d\n", status) - } - } - return encounteredError -} - -// 'docker version': show version information -func (cli *DockerCli) CmdVersion(args ...string) error { - cmd := cli.Subcmd("version", "", "Show the docker version information.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - if dockerversion.VERSION != "" { - fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) - } - fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) - if dockerversion.GITCOMMIT != "" { - fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) - } - - body, _, err := readBody(cli.call("GET", "/version", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteVersion, err := out.AddEnv() - if err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - out.Close() - fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) - fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - release := utils.GetReleaseVersion() - if release != "" { - fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { - fmt.Fprintf(cli.out, ", please update docker") - } - fmt.Fprintf(cli.out, "\n") - } - return nil -} - -// 'docker info': display system-wide information. -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := cli.Subcmd("info", "", "Display system-wide information") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/info", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteInfo, err := out.AddEnv() - if err != nil { - return err - } - - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote info: %s\n", err) - return err - } - out.Close() - - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err - } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - } - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - - if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) - fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - - if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { - fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) - } - if initPath := remoteInfo.Get("InitPath"); initPath != "" { - fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) - } - } - - if len(remoteInfo.GetList("IndexServerAddress")) != 0 { - cli.LoadConfigFile() - u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) - } - } - if !remoteInfo.GetBool("MemoryLimit") { - fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") - } - if !remoteInfo.GetBool("SwapLimit") { - fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") - } - if !remoteInfo.GetBool("IPv4Forwarding") { - fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") - } - return nil -} - -func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to stop one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to restart one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 1) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == syscall.SIGCHLD { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - utils.Errorf("Unsupported signal: %d. Discarding.", s) - } - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { - utils.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -func (cli *DockerCli) CmdStart(args ...string) error { - cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") - attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") - openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var cErr chan error - var tty bool - if *attach || *openStdin { - if cmd.NArg() > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - tty = container.Config.Tty - - if !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if *openStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - cErr = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) - }) - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) - if err != nil { - if !*attach || !*openStdin { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to start one or more containers") - } - } else { - if !*attach || !*openStdin { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - } - if encounteredError != nil { - if *openStdin || *attach { - cli.in.Close() - <-cErr - } - return encounteredError - } - - if *openStdin || *attach { - if tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - return <-cErr - } - return nil -} - -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") - tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var tmpl *template.Template - if *tmplStr != "" { - var err error - if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { - fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) - return &utils.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - } - - indented := new(bytes.Buffer) - indented.WriteByte('[') - status := 0 - - for _, name := range cmd.Args() { - obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) - if err != nil { - if strings.Contains(err.Error(), "No such") { - fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) - } else { - fmt.Fprintf(cli.err, "%s", err) - } - status = 1 - continue - } - } - - if tmpl == nil { - if err = json.Indent(indented, obj, "", " "); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - } else { - // Has template, will render - var value interface{} - if err := json.Unmarshal(obj, &value); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - if err := tmpl.Execute(cli.out, value); err != nil { - return err - } - cli.out.Write([]byte{'\n'}) - } - indented.WriteString(",") - } - - if indented.Len() > 1 { - // Remove trailing ',' - indented.Truncate(indented.Len() - 1) - } - indented.WriteByte(']') - - if tmpl == nil { - if _, err := io.Copy(cli.out, indented); err != nil { - return err - } - } - - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdTop(args ...string) error { - cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() == 0 { - cmd.Usage() - return nil - } - val := url.Values{} - if cmd.NArg() > 1 { - val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) - } - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) - if err != nil { - return err - } - var procs engine.Env - if err := procs.Decode(stream); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - return err - } - for _, proc := range processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdPort(args ...string) error { - cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - port := cmd.Arg(1) - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - var out Container - err = json.Unmarshal(body, &out) - if err != nil { - return err - } - - if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) - } - } else { - return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) - } - return nil -} - -// 'docker rmi IMAGE' removes all images with the name IMAGE -func (cli *DockerCli) CmdRmi(args ...string) error { - var ( - cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") - force = cmd.Bool([]string{"f", "-force"}, false, "Force") - ) - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *force { - v.Set("force", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - } else { - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - continue - } - for _, out := range outs.Data { - if out.Get("Deleted") != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) - } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) - } - } - } - } - return encounteredError -} - -func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") - } - - for _, out := range outs.Data { - outID := out.Get("Id") - if !*quiet { - if *noTrunc { - fmt.Fprintf(w, "%s\t", outID) - } else { - fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) - } - - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) - - if *noTrunc { - fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) - } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) - } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) - } else { - if *noTrunc { - fmt.Fprintln(w, outID) - } else { - fmt.Fprintln(w, utils.TruncateID(outID)) - } - } - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdRm(args ...string) error { - cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") - v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") - link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") - force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - val := url.Values{} - if *v { - val.Set("v", "1") - } - if *link { - val.Set("link", "1") - } - if *force { - val.Set("force", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -// 'docker kill NAME' kills a running container -func (cli *DockerCli) CmdKill(args ...string) error { - cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") - signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to kill one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdImport(args ...string) error { - cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var src, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") - src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - src = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - v := url.Values{} - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("fromSrc", src) - - var in io.Reader - - if src == "-" { - in = cli.in - } - - return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) -} - -func (cli *DockerCli) CmdPush(args ...string) error { - cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") - if err := cmd.Parse(args); err != nil { - return nil - } - name := cmd.Arg(0) - - if name == "" { - cmd.Usage() - return nil - } - - cli.LoadConfigFile() - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(name) - if err != nil { - return err - } - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - // If we're not using a custom registry, we know the restrictions - // applied to repository names and can warn the user in advance. - // Custom repositories can have different rules, and we must also - // allow pushing by image ID. - if len(strings.SplitN(name, "/", 2)) == 1 { - username := cli.configFile.Configs[auth.IndexServerAddress()].Username - if username == "" { - username = "" - } - return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) - } - - v := url.Values{} - push := func(authConfig auth.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := push(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to push:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return push(authConfig) - } - return err - } - return nil -} - -func (cli *DockerCli) CmdPull(args ...string) error { - cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry") - tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) - if *tag == "" { - *tag = parsedTag - } - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - v := url.Values{} - v.Set("fromImage", remote) - v.Set("tag", *tag) - - pull := func(authConfig auth.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := pull(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to pull:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return pull(authConfig) - } - return err - } - - return nil -} - -func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 1 { - cmd.Usage() - return nil - } - - filter := cmd.Arg(0) - - if *flViz || *flTree { - body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - var ( - printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) - startImage *engine.Env - - roots = engine.NewTable("Created", outs.Len()) - byParent = make(map[string]*engine.Table) - ) - - for _, image := range outs.Data { - if image.Get("ParentId") == "" { - roots.Add(image) - } else { - if children, exists := byParent[image.Get("ParentId")]; exists { - children.Add(image) - } else { - byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) - byParent[image.Get("ParentId")].Add(image) - } - } - - if filter != "" { - if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { - startImage = image - } - - for _, repotag := range image.GetList("RepoTags") { - if repotag == filter { - startImage = image - } - } - } - } - - if *flViz { - fmt.Fprintf(cli.out, "digraph docker {\n") - printNode = (*DockerCli).printVizNode - } else { - printNode = (*DockerCli).printTreeNode - } - - if startImage != nil { - root := engine.NewTable("Created", 1) - root.Add(startImage) - cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if filter == "" { - cli.WalkTree(*noTrunc, roots, byParent, "", printNode) - } - if *flViz { - fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") - } - } else { - v := url.Values{} - if cmd.NArg() == 1 { - v.Set("filter", filter) - } - if *all { - v.Set("all", "1") - } - - body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } - - for _, out := range outs.Data { - for _, repotag := range out.GetList("RepoTags") { - - repo, tag := utils.ParseRepositoryTag(repotag) - outID := out.Get("Id") - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) - } else { - fmt.Fprintln(w, outID) - } - } - } - - if !*quiet { - w.Flush() - } - } - return nil -} - -func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { - length := images.Len() - if length > 1 { - for index, image := range images.Data { - if index+1 == length { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } else { - printNode(cli, noTrunc, image, prefix+"\u251C─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) - } - } - } - } else { - for _, image := range images.Data { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } - } -} - -func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { - var ( - imageID string - parentID string - ) - if noTrunc { - imageID = image.Get("Id") - parentID = image.Get("ParentId") - } else { - imageID = utils.TruncateID(image.Get("Id")) - parentID = utils.TruncateID(image.Get("ParentId")) - } - if parentID == "" { - fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) - } else { - fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) - } - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) - } -} - -func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { - var imageID string - if noTrunc { - imageID = image.Get("Id") - } else { - imageID = utils.TruncateID(image.Get("Id")) - } - - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) - } else { - fmt.Fprint(cli.out, "\n") - } -} - -func (cli *DockerCli) CmdPs(args ...string) error { - cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") - since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.") - before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.") - last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") - - if err := cmd.Parse(args); err != nil { - return nil - } - v := url.Values{} - if *last == -1 && *nLatest { - *last = 1 - } - if *all { - v.Set("all", "1") - } - if *last != -1 { - v.Set("limit", strconv.Itoa(*last)) - } - if *since != "" { - v.Set("since", *since) - } - if *before != "" { - v.Set("before", *before) - } - if *size { - v.Set("size", "1") - } - - body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") - if *size { - fmt.Fprintln(w, "\tSIZE") - } else { - fmt.Fprint(w, "\n") - } - } - - for _, out := range outs.Data { - var ( - outID = out.Get("Id") - outNames = out.GetList("Names") - ) - - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - // Remove the leading / from the names - for i := 0; i < len(outNames); i++ { - outNames[i] = outNames[i][1:] - } - - if !*quiet { - var ( - outCommand = out.Get("Command") - ports = engine.NewTable("", 0) - ) - if !*noTrunc { - outCommand = utils.Trunc(outCommand, 20) - } - ports.ReadListFrom([]byte(out.Get("Ports"))) - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ",")) - if *size { - if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) - } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) - } - } else { - fmt.Fprint(w, "\n") - } - } else { - fmt.Fprintln(w, outID) - } - } - - if !*quiet { - w.Flush() - } - return nil -} - -func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") - flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") - flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) - if err := cmd.Parse(args); err != nil { - return nil - } - - var name, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") - name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - name = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - if name == "" { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("container", name) - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("comment", *flComment) - v.Set("author", *flAuthor) - var ( - config *runconfig.Config - env engine.Env - ) - if *flConfig != "" { - config = &runconfig.Config{} - if err := json.Unmarshal([]byte(*flConfig), config); err != nil { - return err - } - } - stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) - if err != nil { - return err - } - if err := env.Decode(stream); err != nil { - return err - } - - fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) - return nil -} - -func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") - since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *since != "" { - loc := time.FixedZone(time.Now().Zone()) - format := "2006-01-02 15:04:05 -0700 MST" - if len(*since) < len(format) { - format = format[:len(*since)] - } - - if t, err := time.ParseInLocation(format, *since, loc); err == nil { - v.Set("since", strconv.FormatInt(t.Unix(), 10)) - } else { - v.Set("since", *since) - } - } - - if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdExport(args ...string) error { - cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - for _, change := range outs.Data { - var kind string - switch change.GetInt("Kind") { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) - } - return nil -} - -func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") - follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - v := url.Values{} - v.Set("logs", "1") - v.Set("stdout", "1") - v.Set("stderr", "1") - if *follow && container.State.Running { - v.Set("stream", "1") - } - - if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdAttach(args ...string) error { - cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") - noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") - proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - if !container.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if container.Config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Debugf("Error monitoring TTY size: %s", err) - } - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if !*noStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - if *proxy && !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { - return err - } - - _, status, err := getExitCode(cli, cmd.Arg(0)) - if err != nil { - return err - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - - return nil -} - -func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") - stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("term", cmd.Arg(0)) - - body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) - - if err != nil { - return err - } - outs := engine.NewTable("star_count", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") - for _, out := range outs.Data { - if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { - continue - } - desc := strings.Replace(out.Get("description"), "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !*noTrunc && len(desc) > 45 { - desc = utils.Trunc(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) - if out.GetBool("is_official") { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if out.GetBool("is_trusted") { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// Ports type - Used to parse multiple -p flags -type ports []int - -func (cli *DockerCli) CmdTag(args ...string) error { - cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") - force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 && cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - var repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") - repository, tag = cmd.Arg(1), cmd.Arg(2) - } else { - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - v := url.Values{} - v.Set("repo", repository) - v.Set("tag", tag) - - if *force { - v.Set("force", "1") - } - - if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdRun(args ...string) error { - // FIXME: just use runconfig.Parse already - config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) - if err != nil { - return err - } - if config.Image == "" { - cmd.Usage() - return nil - } - - // Retrieve relevant client-side config - var ( - flName = cmd.Lookup("name") - flRm = cmd.Lookup("rm") - flSigProxy = cmd.Lookup("sig-proxy") - autoRemove, _ = strconv.ParseBool(flRm.Value.String()) - sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) - ) - - // Disable sigProxy in case on TTY - if config.Tty { - sigProxy = false - } - - var containerIDFile io.WriteCloser - if len(hostConfig.ContainerIDFile) > 0 { - if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { - return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) - } - if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { - return fmt.Errorf("Failed to create the container ID file: %s", err) - } - defer func() { - containerIDFile.Close() - var ( - cidFileInfo os.FileInfo - err error - ) - if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { - return - } - if cidFileInfo.Size() == 0 { - if err := os.Remove(hostConfig.ContainerIDFile); err != nil { - fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) - } - } - }() - } - - containerValues := url.Values{} - if name := flName.Value.String(); name != "" { - containerValues.Set("name", name) - } - - //create the container - stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) - //if image not found try to pull it - if statusCode == 404 { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) - - v := url.Values{} - repos, tag := utils.ParseRepositoryTag(config.Image) - v.Set("fromImage", repos) - v.Set("tag", tag) - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(repos) - if err != nil { - return err - } - - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { - return err - } - if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { - return err - } - } else if err != nil { - return err - } - - var runResult engine.Env - if err := runResult.Decode(stream); err != nil { - return err - } - - for _, warning := range runResult.GetList("Warnings") { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - - if len(hostConfig.ContainerIDFile) > 0 { - if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - } - - if sigProxy { - sigc := cli.forwardAllSignals(runResult.Get("Id")) - defer signal.StopCatch(sigc) - } - - var ( - waitDisplayId chan struct{} - errCh chan error - ) - - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchrone in order to let the client write to stdin before having to read the ID - waitDisplayId = make(chan struct{}) - go func() { - defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) - }() - } - - // We need to instanciate the chan because the select needs it. It can - // be closed but can't be uninitialized. - hijacked := make(chan io.Closer) - - // Block the return until the chan gets closed - defer func() { - utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - utils.Errorf("Hijack did not finish (chan still open)") - } - }() - - if config.AttachStdin || config.AttachStdout || config.AttachStderr { - var ( - out, stderr io.Writer - in io.ReadCloser - v = url.Values{} - ) - v.Set("stream", "1") - - if config.AttachStdin { - v.Set("stdin", "1") - in = cli.in - } - if config.AttachStdout { - v.Set("stdout", "1") - out = cli.out - } - if config.AttachStderr { - v.Set("stderr", "1") - if config.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - errCh = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) - }) - } else { - close(hijacked) - } - - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. - if closer != nil { - defer closer.Close() - } - case err := <-errCh: - if err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { - return err - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayId - return nil - } - - var status int - - // Attached mode - if autoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { - return err - } - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { - return err - } - } else { - if !config.Tty { - // In non-tty mode, we can't dettach, so we know we need to wait. - if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { - return err - } - } else { - // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call - // and result in a wrong exit code. - // No Autoremove: Simply retrieve the exit code - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - } - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdCp(args ...string) error { - cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - var copyData engine.Env - info := strings.Split(cmd.Arg(0), ":") - - if len(info) != 2 { - return fmt.Errorf("Error: Path not specified") - } - - copyData.Set("Resource", info[1]) - copyData.Set("HostPath", cmd.Arg(1)) - - stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) - if stream != nil { - defer stream.Close() - } - if statusCode == 404 { - return fmt.Errorf("No such container: %v", info[0]) - } - if err != nil { - return err - } - - if statusCode == 200 { - if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { - return err - } - } - return nil -} - -func (cli *DockerCli) CmdSave(args ...string) error { - cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)") - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - image := cmd.Arg(0) - if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if env, ok := data.(engine.Env); ok { - if err := env.Encode(params); err != nil { - return nil, -1, err - } - } else { - buf, err := json.Marshal(data) - if err != nil { - return nil, -1, err - } - if _, err := params.Write(buf); err != nil { - return nil, -1, err - } - } - } - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), params) - if err != nil { - return nil, -1, err - } - if passAuthInfo { - cli.LoadConfigFile() - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress()) - getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return nil, err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil - } - if headers, err := getHeaders(authConfig); err == nil && headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - dial, err := net.Dial(cli.proto, cli.addr) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - if err != nil { - clientconn.Close() - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } - if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) - } - return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return clientconn.Close() - }) - return wrapper, resp.StatusCode, nil -} - -func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { - if (method == "POST" || method == "PUT") && in == nil { - in = bytes.NewReader([]byte{}) - } - - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), in) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - dial, err := net.Dial(cli.proto, cli.addr) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - defer clientconn.Close() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) - } - return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { - return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) - } - if _, err := io.Copy(out, resp.Body); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { - defer func() { - if started != nil { - close(started) - } - }() - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), nil) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Header.Set("Content-Type", "plain/text") - req.Host = cli.addr - - dial, err := net.Dial(cli.proto, cli.addr) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - clientconn.Do(req) - - rwc, br := clientconn.Hijack() - defer rwc.Close() - - if started != nil { - started <- rwc - } - - var receiveStdout chan error - - var oldState *term.State - - if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.terminalFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.terminalFd, oldState) - } - - if stdout != nil || stderr != nil { - receiveStdout = utils.Go(func() (err error) { - defer func() { - if in != nil { - if setRawTerminal && cli.isTerminal { - term.RestoreTerminal(cli.terminalFd, oldState) - } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if goruntime.GOOS != "darwin" { - in.Close() - } - } - }() - - // When TTY is ON, use regular copy - if setRawTerminal { - _, err = io.Copy(stdout, br) - } else { - _, err = utils.StdCopy(stdout, stderr, br) - } - utils.Debugf("[hijack] End of stdout") - return err - }) - } - - sendStdin := utils.Go(func() error { - if in != nil { - io.Copy(rwc, in) - utils.Debugf("[hijack] End of stdin") - } - if tcpc, ok := rwc.(*net.TCPConn); ok { - if err := tcpc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) - } - } else if unixc, ok := rwc.(*net.UnixConn); ok { - if err := unixc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) - } - } - // Discard errors due to pipe interruption - return nil - }) - - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { - utils.Errorf("Error receiveStdout: %s", err) - return err - } - } - - if !cli.isTerminal { - if err := <-sendStdin; err != nil { - utils.Errorf("Error sendStdin: %s", err) - return err - } - } - return nil - -} - -func (cli *DockerCli) getTtySize() (int, int) { - if !cli.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(cli.terminalFd) - if err != nil { - utils.Errorf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -func (cli *DockerCli) resizeTty(id string) { - height, width := cli.getTtySize() - if height == 0 && width == 0 { - return - } - v := url.Values{} - v.Set("h", strconv.Itoa(height)) - v.Set("w", strconv.Itoa(width)) - if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Errorf("Error resize: %s", err) - } -} - -func (cli *DockerCli) monitorTtySize(id string) error { - cli.resizeTty(id) - - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, syscall.SIGWINCH) - go func() { - for _ = range sigchan { - cli.resizeTty(id) - } - }() - return nil -} - -func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { - flags := flag.NewFlagSet(name, flag.ContinueOnError) - flags.Usage = func() { - fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) - flags.PrintDefaults() - os.Exit(2) - } - return flags -} - -func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = auth.LoadConfig(os.Getenv("HOME")) - if err != nil { - fmt.Fprintf(cli.err, "WARNING: %s\n", err) - } - return err -} - -func waitForExit(cli *DockerCli, containerId string) (int, error) { - stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) - if err != nil { - return -1, err - } - - var out engine.Env - if err := out.Decode(stream); err != nil { - return -1, err - } - return out.GetInt("StatusCode"), nil -} - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { - return false, -1, err - } - return false, -1, nil - } - c := &Container{} - if err := json.Unmarshal(body, c); err != nil { - return false, -1, err - } - return c.State.Running, c.State.ExitCode, nil -} - -func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { - if stream != nil { - defer stream.Close() - } - if err != nil { - return nil, statusCode, err - } - body, err := ioutil.ReadAll(stream) - if err != nil { - return nil, -1, err - } - return body, statusCode, nil -} - -func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli { - var ( - isTerminal = false - terminalFd uintptr - ) - - if in != nil { - if file, ok := in.(*os.File); ok { - terminalFd = file.Fd() - isTerminal = term.IsTerminal(terminalFd) - } - } - - if err == nil { - err = out - } - return &DockerCli{ - proto: proto, - addr: addr, - in: in, - out: out, - err: err, - isTerminal: isTerminal, - terminalFd: terminalFd, - } -} - -type DockerCli struct { - proto string - addr string - configFile *auth.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - isTerminal bool - terminalFd uintptr -} diff -Nru docker.io-0.9.1~dfsg1/api/common.go docker.io-1.3.2~dfsg1/api/common.go --- docker.io-0.9.1~dfsg1/api/common.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/common.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,20 +2,23 @@ import ( "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" "mime" "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/version" ) const ( - APIVERSION = "1.10" - DEFAULTHTTPHOST = "127.0.0.1" - DEFAULTUNIXSOCKET = "/var/run/docker.sock" + APIVERSION version.Version = "1.15" + DEFAULTHTTPHOST = "127.0.0.1" + DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) func ValidateHost(val string) (string, error) { - host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) + host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) if err != nil { return val, err } @@ -23,11 +26,13 @@ } //TODO remove, used on < 1.5 in getContainersJSON -func displayablePorts(ports *engine.Table) string { +func DisplayablePorts(ports *engine.Table) string { result := []string{} + ports.SetKey("PublicPort") + ports.Sort() for _, port := range ports.Data { if port.Get("IP") == "" { - result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) + result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type"))) } else { result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) } @@ -38,7 +43,7 @@ func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { - utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) + log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType } diff -Nru docker.io-0.9.1~dfsg1/api/container.go docker.io-1.3.2~dfsg1/api/container.go --- docker.io-0.9.1~dfsg1/api/container.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/container.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -package api - -import ( - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" -) - -type Container struct { - Config runconfig.Config - HostConfig runconfig.HostConfig - State struct { - Running bool - ExitCode int - } - NetworkSettings struct { - Ports nat.PortMap - } -} diff -Nru docker.io-0.9.1~dfsg1/api/README.md docker.io-1.3.2~dfsg1/api/README.md --- docker.io-0.9.1~dfsg1/api/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff -Nru docker.io-0.9.1~dfsg1/api/server/MAINTAINERS docker.io-1.3.2~dfsg1/api/server/MAINTAINERS --- docker.io-0.9.1~dfsg1/api/server/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/server/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Victor Vieux (@vieux) +Johan Euphrosine (@proppy) diff -Nru docker.io-0.9.1~dfsg1/api/server/server.go docker.io-1.3.2~dfsg1/api/server/server.go --- docker.io-0.9.1~dfsg1/api/server/server.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/server/server.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,1532 @@ +package server + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "expvar" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/pprof" + "os" + "strconv" + "strings" + "syscall" + + "code.google.com/p/go.net/websocket" + "github.com/docker/libcontainer/user" + "github.com/gorilla/mux" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/listenbuffer" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +var ( + activationLock chan struct{} +) + +type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// Check to make sure request's Content-Type is application/json +func checkForJson(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + if strings.Contains(err.Error(), "No such") { + statusCode = http.StatusNotFound + } else if strings.Contains(err.Error(), "Bad parameter") { + statusCode = http.StatusBadRequest + } else if strings.Contains(err.Error(), "Conflict") { + statusCode = http.StatusConflict + } else if strings.Contains(err.Error(), "Impossible") { + statusCode = http.StatusNotAcceptable + } else if strings.Contains(err.Error(), "Wrong login/password") { + statusCode = http.StatusUnauthorized + } else if strings.Contains(err.Error(), "hasn't been activated") { + statusCode = http.StatusForbidden + } + + if err != nil { + log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) + http.Error(w, err.Error(), statusCode) + } +} + +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return v.Encode(w) +} + +func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { + w.Header().Set("Content-Type", "application/json") + if flush { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } else { + job.Stdout.Add(w) + } +} + +func getBoolParam(value string) (bool, error) { + if value == "" { + return false, nil + } + ret, err := strconv.ParseBool(value) + if err != nil { + return false, fmt.Errorf("Bad parameter") + } + return ret, nil +} + +func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfig, err = ioutil.ReadAll(r.Body) + job = eng.Job("auth") + stdoutBuffer = bytes.NewBuffer(nil) + ) + if err != nil { + return err + } + job.Setenv("authConfig", string(authConfig)) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return err + } + if status := engine.Tail(stdoutBuffer, 1); status != "" { + var env engine.Env + env.Set("Status", status) + return writeJSON(w, http.StatusOK, env) + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("kill", vars["name"]) + if sig := r.Form.Get("signal"); sig != "" { + job.Args = append(job.Args, sig) + } + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("pause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("unpause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("export", vars["name"]) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + err error + outs *engine.Table + job = eng.Job("images") + ) + + job.Setenv("filters", r.Form.Get("filters")) + // FIXME this parameter could just be a match filter + job.Setenv("filter", r.Form.Get("filter")) + job.Setenv("all", r.Form.Get("all")) + + if version.GreaterThanOrEqualTo("1.7") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddListTable(); err != nil { + return err + } + + if err := job.Run(); err != nil { + return err + } + + if version.LessThan("1.7") && outs != nil { // Convert to legacy format + outsLegacy := engine.NewTable("Created", 0) + for _, out := range outs.Data { + for _, repoTag := range out.GetList("RepoTags") { + repo, tag := parsers.ParseRepositoryTag(repoTag) + outLegacy := &engine.Env{} + outLegacy.Set("Repository", repo) + outLegacy.SetJson("Tag", tag) + outLegacy.Set("Id", out.Get("Id")) + outLegacy.SetInt64("Created", out.GetInt64("Created")) + outLegacy.SetInt64("Size", out.GetInt64("Size")) + outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) + outsLegacy.Add(outLegacy) + } + } + w.Header().Set("Content-Type", "application/json") + if _, err := outsLegacy.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.GreaterThan("1.6") { + w.WriteHeader(http.StatusNotFound) + return fmt.Errorf("This is now implemented in the client.") + } + eng.ServeHTTP(w, r) + return nil +} + +func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var job = eng.Job("events") + streamJSON(job, w, true) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("until", r.Form.Get("until")) + return job.Run() +} + +func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var job = eng.Job("history", vars["name"]) + streamJSON(job, w, false) + + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_changes", vars["name"]) + streamJSON(job, w, false) + + return job.Run() +} + +func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.4") { + return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) + streamJSON(job, w, false) + return job.Run() +} + +func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + err error + outs *engine.Table + job = eng.Job("containers") + ) + + job.Setenv("all", r.Form.Get("all")) + job.Setenv("size", r.Form.Get("size")) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("before", r.Form.Get("before")) + job.Setenv("limit", r.Form.Get("limit")) + job.Setenv("filters", r.Form.Get("filters")) + + if version.GreaterThanOrEqualTo("1.5") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddTable(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + if version.LessThan("1.5") { // Convert to legacy format + for _, out := range outs.Data { + ports := engine.NewTable("", 0) + ports.ReadListFrom([]byte(out.Get("Ports"))) + out.Set("Ports", api.DisplayablePorts(ports)) + } + w.Header().Set("Content-Type", "application/json") + if _, err = outs.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + inspectJob = eng.Job("container_inspect", vars["name"]) + logsJob = eng.Job("logs", vars["name"]) + c, err = inspectJob.Stdout.AddEnv() + ) + if err != nil { + return err + } + logsJob.Setenv("follow", r.Form.Get("follow")) + logsJob.Setenv("tail", r.Form.Get("tail")) + logsJob.Setenv("stdout", r.Form.Get("stdout")) + logsJob.Setenv("stderr", r.Form.Get("stderr")) + logsJob.Setenv("timestamps", r.Form.Get("timestamps")) + // Validate args here, because we can't return not StatusOK after job.Run() call + stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + if err = inspectJob.Run(); err != nil { + return err + } + + var outStream, errStream io.Writer + outStream = utils.NewWriteFlusher(w) + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + logsJob.Stdout.Add(outStream) + logsJob.Stderr.Set(errStream) + if err := logsJob.Run(); err != nil { + fmt.Fprintf(outStream, "Error running logs job: %s\n", err) + } + return nil +} + +func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) + job.Setenv("force", r.Form.Get("force")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := config.Decode(r.Body); err != nil { + log.Errorf("%s", err) + } + + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + job.Setenv("pause", "1") + } else { + job.Setenv("pause", r.FormValue("pause")) + } + + job.Setenv("repo", r.Form.Get("repo")) + job.Setenv("tag", r.Form.Get("tag")) + job.Setenv("author", r.Form.Get("author")) + job.Setenv("comment", r.Form.Get("comment")) + job.SetenvSubEnv("config", &config) + + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + env.Set("Id", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusCreated, env) +} + +// Creates an image from Pull or from Import +func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + job *engine.Job + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := ®istry.AuthConfig{} + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + job = eng.Job("pull", image, tag) + job.SetenvBool("parallel", version.GreaterThan("1.3")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) + job.Stdin.Add(r.Body) + } + + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + + return nil +} + +func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + metaHeaders = map[string][]string{} + ) + + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + var job = eng.Job("search", r.Form.Get("term")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + streamJSON(job, w, false) + + return job.Run() +} + +func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := ®istry.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return err + } + } + + job := eng.Job("push", vars["name"]) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + job.Setenv("tag", r.Form.Get("tag")) + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + if version.GreaterThan("1.0") { + w.Header().Set("Content-Type", "application/x-tar") + } + var job *engine.Job + if name, ok := vars["name"]; ok { + job = eng.Job("image_export", name) + } else { + job = eng.Job("image_export", r.Form["names"]...) + } + job.Stdout.Add(w) + return job.Run() +} + +func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + job := eng.Job("load") + job.Stdin.Add(r.Body) + return job.Run() +} + +func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + stdoutBuffer = bytes.NewBuffer(nil) + warnings = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + // Read container ID from the first line of stdout + job.Stdout.Add(stdoutBuffer) + // Read warnings from stderr + job.Stderr.Add(warnings) + if err := job.Run(); err != nil { + return err + } + // Parse warnings from stderr + scanner := bufio.NewScanner(warnings) + for scanner.Scan() { + outWarnings = append(outWarnings, scanner.Text()) + } + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + out.SetList("Warnings", outWarnings) + + return writeJSON(w, http.StatusCreated, out) +} + +func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("restart", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("rm", vars["name"]) + + job.Setenv("forceRemove", r.Form.Get("force")) + + job.Setenv("removeVolume", r.Form.Get("v")) + job.Setenv("removeLink", r.Form.Get("link")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_delete", vars["name"]) + streamJSON(job, w, false) + job.Setenv("force", r.Form.Get("force")) + job.Setenv("noprune", r.Form.Get("noprune")) + + return job.Run() +} + +func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + name = vars["name"] + job = eng.Job("start", name) + ) + + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // http://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + } + + if err := job.Run(); err != nil { + if err.Error() == "Container already started" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("stop", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + if err.Error() == "Container already stopped" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + env engine.Env + stdoutBuffer = bytes.NewBuffer(nil) + job = eng.Job("wait", vars["name"]) + ) + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + + env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusOK, env) +} + +func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + job = eng.Job("container_inspect", vars["name"]) + c, err = job.Stdout.AddEnv() + ) + if err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + job = eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + + } + return nil +} + +func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + job := eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(ws) + job.Stdout.Add(ws) + job.Stderr.Set(ws) + if err := job.Run(); err != nil { + log.Errorf("Error attaching websocket: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.3") { + return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + configFileEncoded = r.Header.Get("X-Registry-Config") + configFile = ®istry.ConfigFile{} + job = eng.Job("build") + ) + + // This block can be removed when API versions prior to 1.9 are deprecated. + // Both headers will be parsed and sent along to the daemon, but if a non-empty + // ConfigFile is present, any value provided as an AuthConfig directly will + // be overridden. See BuildFile::CmdFrom for details. + if version.LessThan("1.9") && authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + + if configFileEncoded != "" { + configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) + if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + configFile = ®istry.ConfigFile{} + } + } + + if version.GreaterThanOrEqualTo("1.8") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else { + job.Setenv("rm", r.FormValue("rm")) + } + job.Stdin.Add(r.Body) + job.Setenv("remote", r.FormValue("remote")) + job.Setenv("t", r.FormValue("t")) + job.Setenv("q", r.FormValue("q")) + job.Setenv("nocache", r.FormValue("nocache")) + job.Setenv("forcerm", r.FormValue("forcerm")) + job.SetenvJson("authConfig", authConfig) + job.SetenvJson("configFile", configFile) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var copyData engine.Env + + if err := checkForJson(r); err != nil { + return err + } + + if err := copyData.Decode(r.Body); err != nil { + return err + } + + if copyData.Get("Resource") == "" { + return fmt.Errorf("Path cannot be empty") + } + + origResource := copyData.Get("Resource") + + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) + } + + job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) + job.Stdout.Add(w) + w.Header().Set("Content-Type", "application/x-tar") + if err := job.Run(); err != nil { + log.Errorf("%s", err.Error()) + if strings.Contains(err.Error(), "No such container") { + w.WriteHeader(http.StatusNotFound) + } else if strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) + } + } + return nil +} + +func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + name = vars["name"] + job = eng.Job("execCreate", name) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + + job.Stdout.Add(stdoutBuffer) + // Register an instance of Exec in container. + if err := job.Run(); err != nil { + fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) + return err + } + // Return the ID + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + + return writeJSON(w, http.StatusCreated, out) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + name = vars["name"] + job = eng.Job("execStart", name) + errOut io.Writer = os.Stderr + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + if !job.GetenvBool("Detach") { + // Setting up the streaming http interface. + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + errOut = outStream + } + // Now run the user process in container. + job.SetCloseIO(false) + if err := job.Run(); err != nil { + fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Access-Control-Allow-Origin", "*") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + log.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + log.Infof("%s %s", r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.APIVERSION + } + if enableCors { + writeCorsHeaders(w, r) + } + + if version.GreaterThan(api.APIVERSION) { + http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) + return + } + + if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { + log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) + httpError(w, err) + } + } +} + +// Replicated from expvar.go as not public. +func expvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func AttachProfiler(router *mux.Router) { + router.HandleFunc("/debug/vars", expvarHandler) + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) + router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + AttachProfiler(r) + } + m := map[string]map[string]HttpApiFunc{ + "GET": { + "/_ping": ping, + "/events": getEvents, + "/info": getInfo, + "/version": getVersion, + "/images/json": getImagesJSON, + "/images/viz": getImagesViz, + "/images/search": getImagesSearch, + "/images/get": getImagesGet, + "/images/{name:.*}/get": getImagesGet, + "/images/{name:.*}/history": getImagesHistory, + "/images/{name:.*}/json": getImagesByName, + "/containers/ps": getContainersJSON, + "/containers/json": getContainersJSON, + "/containers/{name:.*}/export": getContainersExport, + "/containers/{name:.*}/changes": getContainersChanges, + "/containers/{name:.*}/json": getContainersByName, + "/containers/{name:.*}/top": getContainersTop, + "/containers/{name:.*}/logs": getContainersLogs, + "/containers/{name:.*}/attach/ws": wsContainersAttach, + }, + "POST": { + "/auth": postAuth, + "/commit": postCommit, + "/build": postBuild, + "/images/create": postImagesCreate, + "/images/load": postImagesLoad, + "/images/{name:.*}/push": postImagesPush, + "/images/{name:.*}/tag": postImagesTag, + "/containers/create": postContainersCreate, + "/containers/{name:.*}/kill": postContainersKill, + "/containers/{name:.*}/pause": postContainersPause, + "/containers/{name:.*}/unpause": postContainersUnpause, + "/containers/{name:.*}/restart": postContainersRestart, + "/containers/{name:.*}/start": postContainersStart, + "/containers/{name:.*}/stop": postContainersStop, + "/containers/{name:.*}/wait": postContainersWait, + "/containers/{name:.*}/resize": postContainersResize, + "/containers/{name:.*}/attach": postContainersAttach, + "/containers/{name:.*}/copy": postContainersCopy, + "/containers/{name:.*}/exec": postContainerExecCreate, + "/exec/{name:.*}/start": postContainerExecStart, + "/exec/{name:.*}/resize": postContainerExecResize, + }, + "DELETE": { + "/containers/{name:.*}": deleteContainers, + "/images/{name:.*}": deleteImages, + }, + "OPTIONS": { + "": optionsHandler, + }, + } + + for method, routes := range m { + for route, fct := range routes { + log.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r, nil +} + +// ServeRequest processes a single http request to the docker remote api. +// FIXME: refactor this to be part of Server and not require re-creating a new +// router each time. This requires first moving ListenAndServe into Server. +func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(eng, false, true, "") + if err != nil { + return err + } + // Insert APIVERSION into the request as a convenience + req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) + router.ServeHTTP(w, req) + return nil +} + +// ServeFD creates an http.Server and sets it up to serve given a socket activated +// argument. +func ServeFd(addr string, handle http.Handler) error { + ls, e := systemd.ListenFD(addr) + if e != nil { + return e + } + + chErrors := make(chan error, len(ls)) + + // We don't want to start serving on these sockets until the + // daemon is initialized and installed. Otherwise required handlers + // won't be ready. + <-activationLock + + // Since ListenFD will return one or more sockets we have + // to create a go func to spawn off multiple serves + for i := range ls { + listener := ls[i] + go func() { + httpSrv := http.Server{Handler: handle} + chErrors <- httpSrv.Serve(listener) + }() + } + + for i := 0; i < len(ls); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +func lookupGidByName(nameOrGid string) (int, error) { + groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} + +func changeGroup(addr string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + + log.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(addr, 0, gid) +} + +// ListenAndServe sets up the required http.Server and gets it listening for +// each addr passed in and does protocol specific checking. +func ListenAndServe(proto, addr string, job *engine.Job) error { + var l net.Listener + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + + if proto == "fd" { + return ServeFd(addr, r) + } + + if proto == "unix" { + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return err + } + } + + var oldmask int + if proto == "unix" { + oldmask = syscall.Umask(0777) + } + + if job.GetenvBool("BufferRequests") { + l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) + } else { + l, err = net.Listen(proto, addr) + } + + if proto == "unix" { + syscall.Umask(oldmask) + } + if err != nil { + return err + } + + if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { + tlsCert := job.Getenv("TlsCert") + tlsKey := job.Getenv("TlsKey") + cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) + if err != nil { + return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + tlsCert, tlsKey, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{cert}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } + if job.GetenvBool("TlsVerify") { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(job.Getenv("TlsCa")) + if err != nil { + return fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + l = tls.NewListener(l, tlsConfig) + } + + // Basic error and sanity checking + switch proto { + case "tcp": + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + case "unix": + socketGroup := job.Getenv("SocketGroup") + if socketGroup != "" { + if err := changeGroup(addr, socketGroup); err != nil { + if socketGroup == "docker" { + // if the user hasn't explicitly specified the group ownership, don't fail on errors. + log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) + } else { + return err + } + } + + } + if err := os.Chmod(addr, 0660); err != nil { + return err + } + default: + return fmt.Errorf("Invalid protocol format.") + } + + httpSrv := http.Server{Addr: addr, Handler: r} + return httpSrv.Serve(l) +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func ServeApi(job *engine.Job) engine.Status { + if len(job.Args) == 0 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + var ( + protoAddrs = job.Args + chErrors = make(chan error, len(protoAddrs)) + ) + activationLock = make(chan struct{}) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + go func() { + log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + }() + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return job.Error(err) + } + } + + return engine.StatusOK +} + +func AcceptConnections(job *engine.Job) engine.Status { + // Tell the init daemon we are accepting requests + go systemd.SdNotify("READY=1") + + // close the lock so the listeners start accepting connections + if activationLock != nil { + close(activationLock) + } + + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/api/server/server_unit_test.go docker.io-1.3.2~dfsg1/api/server/server_unit_test.go --- docker.io-0.9.1~dfsg1/api/server/server_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/server/server_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,555 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/version" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} + +func TestGetVersion(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("version", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetJson("Version", "42.1") + v.Set("ApiVersion", "1.1.1.1.1") + v.Set("GoVersion", "2.42") + v.Set("Os", "Linux") + v.Set("Arch", "x86_64") + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/version", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.Get("Version") != "42.1" { + t.Fatalf("%#v\n", v) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetInfo(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("info", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetInt("Containers", 1) + v.SetInt("Images", 42000) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/info", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.GetInt("Images") != 42000 { + t.Fatalf("%#v\n", v) + } + if v.GetInt("Containers") != 1 { + t.Fatalf("%#v\n", v) + } + assertContentType(r, "application/json", t) +} + +func TestGetImagesJSON(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + v := createEnvFromGetImagesJSONStruct(sampleImage) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + var observed getImagesJSONStruct + if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(observed, sampleImage) { + t.Errorf("Expected %#v but got %#v", sampleImage, observed) + } +} + +func TestGetImagesJSONFilter(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filter") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) + if filter != "aaaa" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONFilters(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filters") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) + if filter != "nnnn" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONAll(t *testing.T) { + eng := engine.New() + allFilter := "-1" + eng.Register("images", func(job *engine.Job) engine.Status { + allFilter = job.Getenv("all") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?all=1", nil, eng, t) + if allFilter != "1" { + t.Errorf("%#v", allFilter) + } +} + +func TestGetImagesJSONLegacyFormat(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + outsLegacy := engine.NewTable("Created", 0) + outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) + if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + images := engine.NewTable("Created", 0) + if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + if images.Len() != 1 { + t.Fatalf("Expected 1 image, %d found", images.Len()) + } + image := images.Data[0] + if image.Get("Tag") != "test-tag" { + t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) + } + if image.Get("Repository") != "test-name" { + t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) + } +} + +func TestGetContainersByName(t *testing.T) { + eng := engine.New() + name := "container_name" + var called bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Errorf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Errorf("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Errorf("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestGetEvents(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("events", func(job *engine.Job) engine.Status { + called = true + since := job.Getenv("since") + if since != "1" { + t.Fatalf("'since' should be 1, found %#v instead", since) + } + until := job.Getenv("until") + if until != "0" { + t.Fatalf("'until' should be 0, found %#v instead", until) + } + v := &engine.Env{} + v.Set("since", since) + v.Set("until", until) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdout_json struct { + Since int + Until int + } + if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { + t.Fatal(err) + } + if stdout_json.Since != 1 { + t.Errorf("since != 1: %#v", stdout_json.Since) + } + if stdout_json.Until != 0 { + t.Errorf("until != 0: %#v", stdout_json.Until) + } +} + +func TestLogs(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + expected := "logs" + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + follow := job.Getenv("follow") + if follow != "1" { + t.Fatalf("follow: %s, must be 1", follow) + } + stdout := job.Getenv("stdout") + if stdout != "1" { + t.Fatalf("stdout %s, must be 1", stdout) + } + stderr := job.Getenv("stderr") + if stderr != "" { + t.Fatalf("stderr %s, must be empty", stderr) + } + timestamps := job.Getenv("timestamps") + if timestamps != "1" { + t.Fatalf("timestamps %s, must be 1", timestamps) + } + job.Stdout.Write([]byte(expected)) + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if !inspect { + t.Fatal("container_inspect job was not called") + } + if !logs { + t.Fatal("logs job was not called") + } + res := r.Body.String() + if res != expected { + t.Fatalf("Output %s, expected %s", res, expected) + } +} + +func TestLogsNoStreams(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs", nil, eng, t) + if r.Code != http.StatusBadRequest { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) + } + if inspect { + t.Fatal("container_inspect job was called, but it shouldn't") + } + if logs { + t.Fatal("logs job was called, but it shouldn't") + } + res := strings.TrimSpace(r.Body.String()) + expected := "Bad parameters: you must choose at least one stream" + if !strings.Contains(res, expected) { + t.Fatalf("Output %s, expected %s in it", res, expected) + } +} + +func TestGetImagesHistory(t *testing.T) { + eng := engine.New() + imageName := "docker-test-image" + var called bool + eng.Register("history", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != imageName { + t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) + } + v := &engine.Env{} + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetImagesByName(t *testing.T) { + eng := engine.New() + name := "image_name" + var called bool + eng.Register("image_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Fatal("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Fatal("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestDeleteContainers(t *testing.T) { + eng := engine.New() + name := "foo" + var called bool + eng.Register("rm", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatalf("Job arguments is empty") + } + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + return engine.StatusOK + }) + r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusNoContent { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) + } +} + +func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) +} + +func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + r := httptest.NewRecorder() + req, err := http.NewRequest(method, target, body) + if err != nil { + t.Fatal(err) + } + if err := ServeRequest(eng, version, r, req); err != nil { + t.Fatal(err) + } + return r +} + +func readEnv(src io.Reader, t *testing.T) *engine.Env { + out := engine.NewOutput() + v, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, src); err != nil { + t.Fatal(err) + } + out.Close() + return v +} + +func toJson(data interface{}, t *testing.T) io.Reader { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(data); err != nil { + t.Fatal(err) + } + return &buf +} + +func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { + if recorder.HeaderMap.Get("Content-Type") != content_type { + t.Fatalf("%#v\n", recorder) + } +} + +// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that +// should die as soon as we converted all integration tests? +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { + v := &engine.Env{} + v.SetList("RepoTags", data.RepoTags) + v.Set("Id", data.Id) + v.SetInt64("Created", data.Created) + v.SetInt64("Size", data.Size) + v.SetInt64("VirtualSize", data.VirtualSize) + return v +} + +type getImagesJSONStruct struct { + RepoTags []string + Id string + Created int64 + Size int64 + VirtualSize int64 +} + +var sampleImage getImagesJSONStruct = getImagesJSONStruct{ + RepoTags: []string{"test-name:test-tag"}, + Id: "ID", + Created: 999, + Size: 777, + VirtualSize: 666, +} diff -Nru docker.io-0.9.1~dfsg1/api/server.go docker.io-1.3.2~dfsg1/api/server.go --- docker.io-0.9.1~dfsg1/api/server.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/api/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1222 +0,0 @@ -package api - -import ( - "bufio" - "bytes" - "code.google.com/p/go.net/websocket" - "encoding/base64" - "encoding/json" - "expvar" - "fmt" - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/listenbuffer" - "github.com/dotcloud/docker/pkg/systemd" - "github.com/dotcloud/docker/pkg/user" - "github.com/dotcloud/docker/pkg/version" - "github.com/dotcloud/docker/utils" - "github.com/gorilla/mux" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/pprof" - "os" - "strconv" - "strings" - "syscall" -) - -var ( - activationLock chan struct{} -) - -type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -//If we don't do this, POST method without Content-type (even with empty body) will fail -func parseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func parseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func httpError(w http.ResponseWriter, err error) { - statusCode := http.StatusInternalServerError - // FIXME: this is brittle and should not be necessary. - // If we need to differentiate between different possible error types, we should - // create appropriate error types with clearly defined meaning. - if strings.Contains(err.Error(), "No such") { - statusCode = http.StatusNotFound - } else if strings.Contains(err.Error(), "Bad parameter") { - statusCode = http.StatusBadRequest - } else if strings.Contains(err.Error(), "Conflict") { - statusCode = http.StatusConflict - } else if strings.Contains(err.Error(), "Impossible") { - statusCode = http.StatusNotAcceptable - } else if strings.Contains(err.Error(), "Wrong login/password") { - statusCode = http.StatusUnauthorized - } else if strings.Contains(err.Error(), "hasn't been activated") { - statusCode = http.StatusForbidden - } - - if err != nil { - utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) - http.Error(w, err.Error(), statusCode) - } -} - -func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return v.Encode(w) -} - -func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { - w.Header().Set("Content-Type", "application/json") - if flush { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } else { - job.Stdout.Add(w) - } -} - -func getBoolParam(value string) (bool, error) { - if value == "" { - return false, nil - } - ret, err := strconv.ParseBool(value) - if err != nil { - return false, fmt.Errorf("Bad parameter") - } - return ret, nil -} - -func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfig, err = ioutil.ReadAll(r.Body) - job = eng.Job("auth") - status string - ) - if err != nil { - return err - } - job.Setenv("authConfig", string(authConfig)) - job.Stdout.AddString(&status) - if err = job.Run(); err != nil { - return err - } - if status != "" { - var env engine.Env - env.Set("Status", status) - return writeJSON(w, http.StatusOK, env) - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - job := eng.Job("kill", vars["name"]) - if sig := r.Form.Get("signal"); sig != "" { - job.Args = append(job.Args, sig) - } - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("export", vars["name"]) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - err error - outs *engine.Table - job = eng.Job("images") - ) - - job.Setenv("filter", r.Form.Get("filter")) - job.Setenv("all", r.Form.Get("all")) - - if version.GreaterThanOrEqualTo("1.7") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddListTable(); err != nil { - return err - } - - if err := job.Run(); err != nil { - return err - } - - if version.LessThan("1.7") && outs != nil { // Convert to legacy format - outsLegacy := engine.NewTable("Created", 0) - for _, out := range outs.Data { - for _, repoTag := range out.GetList("RepoTags") { - parts := strings.Split(repoTag, ":") - outLegacy := &engine.Env{} - outLegacy.Set("Repository", parts[0]) - outLegacy.Set("Tag", parts[1]) - outLegacy.Set("Id", out.Get("Id")) - outLegacy.SetInt64("Created", out.GetInt64("Created")) - outLegacy.SetInt64("Size", out.GetInt64("Size")) - outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) - outsLegacy.Add(outLegacy) - } - } - w.Header().Set("Content-Type", "application/json") - if _, err := outsLegacy.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.GreaterThan("1.6") { - w.WriteHeader(http.StatusNotFound) - return fmt.Errorf("This is now implemented in the client.") - } - eng.ServeHTTP(w, r) - return nil -} - -func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var job = eng.Job("events", r.RemoteAddr) - streamJSON(job, w, true) - job.Setenv("since", r.Form.Get("since")) - return job.Run() -} - -func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var job = eng.Job("history", vars["name"]) - streamJSON(job, w, false) - - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("changes", vars["name"]) - streamJSON(job, w, false) - - return job.Run() -} - -func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.4") { - return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) - streamJSON(job, w, false) - return job.Run() -} - -func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - err error - outs *engine.Table - job = eng.Job("containers") - ) - - job.Setenv("all", r.Form.Get("all")) - job.Setenv("size", r.Form.Get("size")) - job.Setenv("since", r.Form.Get("since")) - job.Setenv("before", r.Form.Get("before")) - job.Setenv("limit", r.Form.Get("limit")) - - if version.GreaterThanOrEqualTo("1.5") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddTable(); err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - if version.LessThan("1.5") { // Convert to legacy format - for _, out := range outs.Data { - ports := engine.NewTable("", 0) - ports.ReadListFrom([]byte(out.Get("Ports"))) - out.Set("Ports", displayablePorts(ports)) - } - w.Header().Set("Content-Type", "application/json") - if _, err = outs.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) - job.Setenv("force", r.Form.Get("force")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - config engine.Env - env engine.Env - job = eng.Job("commit", r.Form.Get("container")) - ) - if err := config.Decode(r.Body); err != nil { - utils.Errorf("%s", err) - } - - job.Setenv("repo", r.Form.Get("repo")) - job.Setenv("tag", r.Form.Get("tag")) - job.Setenv("author", r.Form.Get("author")) - job.Setenv("comment", r.Form.Get("comment")) - job.SetenvSubEnv("config", &config) - - var id string - job.Stdout.AddString(&id) - if err := job.Run(); err != nil { - return err - } - env.Set("Id", id) - return writeJSON(w, http.StatusCreated, env) -} - -// Creates an image from Pull or from Import -func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - tag = r.Form.Get("tag") - job *engine.Job - ) - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &auth.AuthConfig{} - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} - } - } - if image != "" { //pull - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - job = eng.Job("pull", r.Form.Get("fromImage"), tag) - job.SetenvBool("parallel", version.GreaterThan("1.3")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - } else { //import - job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) - job.Stdin.Add(r.Body) - } - - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - -func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = &auth.AuthConfig{} - metaHeaders = map[string][]string{} - ) - - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - var job = eng.Job("search", r.Form.Get("term")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - streamJSON(job, w, false) - - return job.Run() -} - -func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, false) - } else { - job.Stdout.Add(w) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - -func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := parseForm(r); err != nil { - return err - } - authConfig := &auth.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return err - } - } - - job := eng.Job("push", vars["name"]) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if version.GreaterThan("1.0") { - w.Header().Set("Content-Type", "application/x-tar") - } - job := eng.Job("image_export", vars["name"]) - job.Stdout.Add(w) - return job.Run() -} - -func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - job := eng.Job("load") - job.Stdin.Add(r.Body) - return job.Run() -} - -func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return nil - } - var ( - out engine.Env - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - outId string - warnings = bytes.NewBuffer(nil) - ) - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - // Read container ID from the first line of stdout - job.Stdout.AddString(&outId) - // Read warnings from stderr - job.Stderr.Add(warnings) - if err := job.Run(); err != nil { - return err - } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } - out.Set("Id", outId) - out.SetList("Warnings", outWarnings) - return writeJSON(w, http.StatusCreated, out) -} - -func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("restart", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("container_delete", vars["name"]) - job.Setenv("removeVolume", r.Form.Get("v")) - job.Setenv("removeLink", r.Form.Get("link")) - job.Setenv("forceRemove", r.Form.Get("force")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("image_delete", vars["name"]) - streamJSON(job, w, false) - job.Setenv("force", r.Form.Get("force")) - - return job.Run() -} - -func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] - job := eng.Job("start", name) - // allow a nil body for backwards compatibility - if r.Body != nil { - if MatchesContentType(r.Header.Get("Content-Type"), "application/json") { - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - } - } - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("stop", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var ( - env engine.Env - status string - job = eng.Job("wait", vars["name"]) - ) - job.Stdout.AddString(&status) - if err := job.Run(); err != nil { - return err - } - // Parse a 16-bit encoded integer to map typical unix exit status. - _, err := strconv.ParseInt(status, 10, 16) - if err != nil { - return err - } - env.Set("StatusCode", status) - return writeJSON(w, http.StatusOK, env) -} - -func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { - return err - } - return nil -} - -func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var ( - job = eng.Job("inspect", vars["name"], "container") - c, err = job.Stdout.AddEnv() - ) - if err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() - - var errStream io.Writer - - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - - if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = utils.NewStdWriter(outStream, utils.Stderr) - outStream = utils.NewStdWriter(outStream, utils.Stdout) - } else { - errStream = outStream - } - - job = eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(inStream) - job.Stdout.Add(outStream) - job.Stderr.Set(errStream) - if err := job.Run(); err != nil { - fmt.Fprintf(outStream, "Error: %s\n", err) - - } - return nil -} - -func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { - return err - } - - h := websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - job := eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(ws) - job.Stdout.Add(ws) - job.Stderr.Set(ws) - if err := job.Run(); err != nil { - utils.Errorf("Error: %s", err) - } - }) - h.ServeHTTP(w, r) - - return nil -} - -func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("inspect", vars["name"], "container") - streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job - return job.Run() -} - -func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("inspect", vars["name"], "image") - streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job - return job.Run() -} - -func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.3") { - return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = &auth.AuthConfig{} - configFileEncoded = r.Header.Get("X-Registry-Config") - configFile = &auth.ConfigFile{} - job = eng.Job("build") - ) - - // This block can be removed when API versions prior to 1.9 are deprecated. - // Both headers will be parsed and sent along to the daemon, but if a non-empty - // ConfigFile is present, any value provided as an AuthConfig directly will - // be overridden. See BuildFile::CmdFrom for details. - if version.LessThan("1.9") && authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} - } - } - - if configFileEncoded != "" { - configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) - if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - configFile = &auth.ConfigFile{} - } - } - - if version.GreaterThanOrEqualTo("1.8") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - job.Stdin.Add(r.Body) - job.Setenv("remote", r.FormValue("remote")) - job.Setenv("t", r.FormValue("t")) - job.Setenv("q", r.FormValue("q")) - job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("rm", r.FormValue("rm")) - job.SetenvJson("authConfig", authConfig) - job.SetenvJson("configFile", configFile) - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var copyData engine.Env - - if contentType := r.Header.Get("Content-Type"); MatchesContentType(contentType, "application/json") { - if err := copyData.Decode(r.Body); err != nil { - return err - } - } else { - return fmt.Errorf("Content-Type not supported: %s", contentType) - } - - if copyData.Get("Resource") == "" { - return fmt.Errorf("Path cannot be empty") - } - if copyData.Get("Resource")[0] == '/' { - copyData.Set("Resource", copyData.Get("Resource")[1:]) - } - - job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - utils.Errorf("%s", err.Error()) - if strings.Contains(err.Error(), "No such container") { - w.WriteHeader(http.StatusNotFound) - } - } - return nil -} - -func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} -func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Access-Control-Allow-Origin", "*") - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") - w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") -} - -func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // log the request - utils.Debugf("Calling %s %s", localMethod, localRoute) - - if logging { - log.Println(r.Method, r.RequestURI) - } - - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) { - utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) - } - } - version := version.Version(mux.Vars(r)["version"]) - if version == "" { - version = APIVERSION - } - if enableCors { - writeCorsHeaders(w, r) - } - - if version.GreaterThan(APIVERSION) { - http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, APIVERSION).Error(), http.StatusNotFound) - return - } - - if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { - utils.Errorf("Error: %s", err) - httpError(w, err) - } - } -} - -// Replicated from expvar.go as not public. -func expvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func AttachProfiler(router *mux.Router) { - router.HandleFunc("/debug/vars", expvarHandler) - router.HandleFunc("/debug/pprof/", pprof.Index) - router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - router.HandleFunc("/debug/pprof/profile", pprof.Profile) - router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) - router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { - r := mux.NewRouter() - if os.Getenv("DEBUG") != "" { - AttachProfiler(r) - } - m := map[string]map[string]HttpApiFunc{ - "GET": { - "/events": getEvents, - "/info": getInfo, - "/version": getVersion, - "/images/json": getImagesJSON, - "/images/viz": getImagesViz, - "/images/search": getImagesSearch, - "/images/{name:.*}/get": getImagesGet, - "/images/{name:.*}/history": getImagesHistory, - "/images/{name:.*}/json": getImagesByName, - "/containers/ps": getContainersJSON, - "/containers/json": getContainersJSON, - "/containers/{name:.*}/export": getContainersExport, - "/containers/{name:.*}/changes": getContainersChanges, - "/containers/{name:.*}/json": getContainersByName, - "/containers/{name:.*}/top": getContainersTop, - "/containers/{name:.*}/attach/ws": wsContainersAttach, - }, - "POST": { - "/auth": postAuth, - "/commit": postCommit, - "/build": postBuild, - "/images/create": postImagesCreate, - "/images/{name:.*}/insert": postImagesInsert, - "/images/load": postImagesLoad, - "/images/{name:.*}/push": postImagesPush, - "/images/{name:.*}/tag": postImagesTag, - "/containers/create": postContainersCreate, - "/containers/{name:.*}/kill": postContainersKill, - "/containers/{name:.*}/restart": postContainersRestart, - "/containers/{name:.*}/start": postContainersStart, - "/containers/{name:.*}/stop": postContainersStop, - "/containers/{name:.*}/wait": postContainersWait, - "/containers/{name:.*}/resize": postContainersResize, - "/containers/{name:.*}/attach": postContainersAttach, - "/containers/{name:.*}/copy": postContainersCopy, - }, - "DELETE": { - "/containers/{name:.*}": deleteContainers, - "/images/{name:.*}": deleteImages, - }, - "OPTIONS": { - "": optionsHandler, - }, - } - - for method, routes := range m { - for route, fct := range routes { - utils.Debugf("Registering %s, %s", method, route) - // NOTE: scope issue, make sure the variables are local and won't be changed - localRoute := route - localFct := fct - localMethod := method - - // build the handler function - f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) - - // add the new route - if localRoute == "" { - r.Methods(localMethod).HandlerFunc(f) - } else { - r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) - r.Path(localRoute).Methods(localMethod).HandlerFunc(f) - } - } - } - - return r, nil -} - -// ServeRequest processes a single http request to the docker remote api. -// FIXME: refactor this to be part of Server and not require re-creating a new -// router each time. This requires first moving ListenAndServe into Server. -func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { - router, err := createRouter(eng, false, true, "") - if err != nil { - return err - } - // Insert APIVERSION into the request as a convenience - req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) - router.ServeHTTP(w, req) - return nil -} - -// ServeFD creates an http.Server and sets it up to serve given a socket activated -// argument. -func ServeFd(addr string, handle http.Handler) error { - ls, e := systemd.ListenFD(addr) - if e != nil { - return e - } - - chErrors := make(chan error, len(ls)) - - // We don't want to start serving on these sockets until the - // "initserver" job has completed. Otherwise required handlers - // won't be ready. - <-activationLock - - // Since ListenFD will return one or more sockets we have - // to create a go func to spawn off multiple serves - for i := range ls { - listener := ls[i] - go func() { - httpSrv := http.Server{Handler: handle} - chErrors <- httpSrv.Serve(listener) - }() - } - - for i := 0; i < len(ls); i += 1 { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -func lookupGidByName(nameOrGid string) (int, error) { - groups, err := user.ParseGroupFilter(func(g *user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} - -func changeGroup(addr string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - - utils.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(addr, 0, gid) -} - -// ListenAndServe sets up the required http.Server and gets it listening for -// each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion, socketGroup string) error { - r, err := createRouter(eng, logging, enableCors, dockerVersion) - - if err != nil { - return err - } - - if proto == "fd" { - return ServeFd(addr, r) - } - - if proto == "unix" { - if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { - return err - } - } - - l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) - if err != nil { - return err - } - - // Basic error and sanity checking - switch proto { - case "tcp": - if !strings.HasPrefix(addr, "127.0.0.1") { - log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } - case "unix": - if err := os.Chmod(addr, 0660); err != nil { - return err - } - - if socketGroup != "" { - if err := changeGroup(addr, socketGroup); err != nil { - if socketGroup == "docker" { - // if the user hasn't explicitly specified the group ownership, don't fail on errors. - utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) - } else { - return err - } - } - } - default: - return fmt.Errorf("Invalid protocol format.") - } - - httpSrv := http.Server{Addr: addr, Handler: r} - return httpSrv.Serve(l) -} - -// ServeApi loops through all of the protocols sent in to docker and spawns -// off a go routine to setup a serving http.Server for each. -func ServeApi(job *engine.Job) engine.Status { - var ( - protoAddrs = job.Args - chErrors = make(chan error, len(protoAddrs)) - ) - activationLock = make(chan struct{}) - - if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { - return job.Error(err) - } - - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - go func() { - log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"), job.Getenv("SocketGroup")) - }() - } - - for i := 0; i < len(protoAddrs); i += 1 { - err := <-chErrors - if err != nil { - return job.Error(err) - } - } - - return engine.StatusOK -} - -func AcceptConnections(job *engine.Job) engine.Status { - // Tell the init daemon we are accepting requests - go systemd.SdNotify("READY=1") - - // close the lock so the listeners start accepting connections - close(activationLock) - - return engine.StatusOK -} diff -Nru docker.io-0.9.1~dfsg1/archive/archive.go docker.io-1.3.2~dfsg1/archive/archive.go --- docker.io-0.9.1~dfsg1/archive/archive.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/archive.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,642 +0,0 @@ -package archive - -import ( - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" -) - -type ( - Archive io.ReadCloser - ArchiveReader io.Reader - Compression int - TarOptions struct { - Includes []string - Compression Compression - } -) - -var ( - ErrNotImplemented = errors.New("Function not implemented") -) - -const ( - Uncompressed Compression = iota - Bzip2 - Gzip - Xz -) - -func DetectCompression(source []byte) Compression { - sourceLen := len(source) - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - fail := false - if len(m) > sourceLen { - utils.Debugf("Len too short") - continue - } - i := 0 - for _, b := range m { - if b != source[i] { - fail = true - break - } - i++ - } - if !fail { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return CmdStream(exec.Command(args[0], args[1:]...), archive) -} - -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - buf := make([]byte, 10) - totalN := 0 - for totalN < 10 { - n, err := archive.Read(buf[totalN:]) - if err != nil { - if err == io.EOF { - return nil, fmt.Errorf("Tarball too short") - } - return nil, err - } - totalN += n - utils.Debugf("[tar autodetect] n: %d", n) - } - compression := DetectCompression(buf) - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - switch compression { - case Uncompressed: - return ioutil.NopCloser(wrap), nil - case Gzip: - return gzip.NewReader(wrap) - case Bzip2: - return ioutil.NopCloser(bzip2.NewReader(wrap)), nil - case Xz: - return xzDecompress(wrap) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - - switch compression { - case Uncompressed: - return utils.NopWriteCloser(dest), nil - case Gzip: - return gzip.NewWriter(dest), nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -func addTarFile(path, name string, tw *tar.Writer) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - - if fi.IsDir() && !strings.HasSuffix(name, "/") { - name = name + "/" - } - - hdr.Name = name - - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { - // Currently go does not fill in the major/minors - if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { - hdr.Devmajor = int64(major(uint64(stat.Rdev))) - hdr.Devminor = int64(minor(uint64(stat.Rdev))) - } - - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - if err := tw.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - if file, err := os.Open(path); err != nil { - return err - } else { - _, err := io.Copy(tw, file) - if err != nil { - return err - } - file.Close() - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - - case tar.TypeLink: - if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { - return err - } - - case tar.TypeSymlink: - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - utils.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { - return err - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and - if hdr.Typeflag != tar.TypeSymlink { - if err := system.UtimesNano(path, ts); err != nil { - return err - } - } else { - if err := system.LUtimesNano(path, ts); err != nil { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarFilter(path, &TarOptions{Compression: compression}) -} - -func escapeName(name string) string { - escaped := make([]byte, 0) - for i, c := range []byte(name) { - if i == 0 && c == '/' { - continue - } - // all printable chars except "-" which is 0x2d - if (0x20 <= c && c <= 0x7E) && c != 0x2d { - escaped = append(escaped, c) - } else { - escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) - } - } - return string(escaped) -} - -// Tar creates an archive from the directory at `path`, only including files whose relative -// paths are included in `filter`. If `filter` is nil, then all files are included. -func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) { - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - tw := tar.NewWriter(compressWriter) - - go func() { - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - if options.Includes == nil { - options.Includes = []string{"."} - } - - for _, include := range options.Includes { - filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { - if err != nil { - utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil { - return nil - } - - if err := addTarFile(filePath, relFilePath, tw); err != nil { - utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err) - } - return nil - }) - } - - // Make sure to check the error on Close. - if err := tw.Close(); err != nil { - utils.Debugf("Can't close tar writer: %s\n", err) - } - if err := compressWriter.Close(); err != nil { - utils.Debugf("Can't close compress writer: %s\n", err) - } - if err := pipeWriter.Close(); err != nil { - utils.Debugf("Can't close pipe writer: %s\n", err) - } - }() - - return pipeReader, nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `path`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(archive io.Reader, dest string, options *TarOptions) error { - if archive == nil { - return fmt.Errorf("Empty archive") - } - - decompressedArchive, err := DecompressStream(archive) - if err != nil { - return err - } - defer decompressedArchive.Close() - - tr := tar.NewReader(decompressedArchive) - - var dirs []*tar.Header - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - if !strings.HasSuffix(hdr.Name, "/") { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 600) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - - if err := createTarFile(path, dest, hdr, tr); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - } - - return nil -} - -// TarUntar is a convenience function which calls Tar and Untar, with -// the output of one piped into the other. If either Tar or Untar fails, -// TarUntar aborts and returns the error. -func TarUntar(src string, dst string) error { - utils.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - return Untar(archive, dst, nil) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - if err := Untar(archive, dst, nil); err != nil { - return err - } - return nil -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -// -func CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - utils.Debugf("Creating dest directory: %s", dst) - if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { - return err - } - utils.Debugf("Calling TarUntar(%s, %s)", src, dst) - return TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/', the final destination path -// will be `dst/base(src)`. -func CopyFileWithTar(src, dst string) (err error) { - utils.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - // Clean up the trailing / - if dst[len(dst)-1] == '/' { - dst = path.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { - return err - } - - r, w := io.Pipe() - errC := utils.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - tw := tar.NewWriter(w) - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - tw.Close() - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - return Untar(r, filepath.Dir(dst), nil) -} - -// CmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - if input != nil { - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - // Write stdin if any - go func() { - io.Copy(stdin, input) - stdin.Close() - }() - } - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - pipeR, pipeW := io.Pipe() - errChan := make(chan []byte) - // Collect stderr, we will use it in case of an error - go func() { - errText, e := ioutil.ReadAll(stderr) - if e != nil { - errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") - } - errChan <- errText - }() - // Copy stdout to the returned pipe - go func() { - _, err := io.Copy(pipeW, stdout) - if err != nil { - pipeW.CloseWithError(err) - } - errText := <-errChan - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) - } else { - pipeW.Close() - } - }() - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{f, size}, nil -} - -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - if err != nil { - os.Remove(archive.File.Name()) - } - return n, err -} diff -Nru docker.io-0.9.1~dfsg1/archive/archive_test.go docker.io-1.3.2~dfsg1/archive/archive_test.go --- docker.io-0.9.1~dfsg1/archive/archive_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/archive_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "testing" - "time" -) - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := CmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func tarUntar(t *testing.T, origin string, compression Compression) error { - archive, err := Tar(origin, compression) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - if detectedCompression.Extension() != compression.Extension() { - return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return err - } - if _, err := os.Stat(tmp); err != nil { - return err - } - - changes, err := ChangesDirs(origin, tmp) - if err != nil { - return err - } - - if len(changes) != 0 { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - - return nil -} - -func TestTarUntar(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - if err := tarUntar(t, origin, c); err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - err := createTarFile("pax_global_header", "some_dir", &hdr, nil) - if err != nil { - t.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/archive/changes.go docker.io-1.3.2~dfsg1/archive/changes.go --- docker.io-0.9.1~dfsg1/archive/changes.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/changes.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,369 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "time" -) - -type ChangeType int - -const ( - ChangeModify = iota - ChangeAdd - ChangeDelete -) - -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -func Changes(layers []string, rw string) ([]Change, error) { - var changes []Change - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - path = filepath.Join("/", path) - - // Skip root - if path == "/" { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, ".wh.") { - originalFile := file[len(".wh."):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -type FileInfo struct { - parent *FileInfo - name string - stat syscall.Stat_t - children map[string]*FileInfo - capability []byte -} - -func (root *FileInfo) LookUp(path string) *FileInfo { - parent := root - if path == "/" { - return root - } - - pathElements := strings.Split(path, "/") - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - return "/" - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := &oldChild.stat - newStat := &newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if oldStat.Mode != newStat.Mode || - oldStat.Uid != newStat.Uid || - oldStat.Gid != newStat.Gid || - oldStat.Rdev != newStat.Rdev || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - -} - -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - root := &FileInfo{ - name: "/", - children: make(map[string]*FileInfo), - } - return root -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - relPath = filepath.Join("/", relPath) - - if relPath == "/" { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - if err := syscall.Lstat(path, &info.stat); err != nil { - return err - } - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} - -// Compare two directories and generate an array of Change objects describing the changes -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - oldRoot, err := collectFileInfo(oldDir) - if err != nil { - return nil, err - } - newRoot, err := collectFileInfo(newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -func ChangesSize(newDir string, changes []Change) int64 { - var size int64 - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, _ := os.Lstat(file) - if fileInfo != nil && !fileInfo.IsDir() { - size += fileInfo.Size() - } - } - } - return size -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -func ExportChanges(dir string, changes []Change) (Archive, error) { - reader, writer := io.Pipe() - tw := tar.NewWriter(writer) - - go func() { - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: time.Now(), - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - if err := tw.WriteHeader(hdr); err != nil { - utils.Debugf("Can't write whiteout header: %s\n", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := addTarFile(path, change.Path[1:], tw); err != nil { - utils.Debugf("Can't add file %s to tar: %s\n", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := tw.Close(); err != nil { - utils.Debugf("Can't close layer: %s\n", err) - } - writer.Close() - }() - return reader, nil -} diff -Nru docker.io-0.9.1~dfsg1/archive/changes_test.go docker.io-1.3.2~dfsg1/archive/changes_test.go --- docker.io-0.9.1~dfsg1/archive/changes_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/changes_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,301 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "testing" - "time" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -// Helper to sort []Change by path -type byPath struct{ changes []Change } - -func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } -func (b byPath) Len() int { return len(b.changes) } -func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := os.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -// Create an directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(byPath{changes}) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dir3", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} diff -Nru docker.io-0.9.1~dfsg1/archive/diff.go docker.io-1.3.2~dfsg1/archive/diff.go --- docker.io-0.9.1~dfsg1/archive/diff.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/diff.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -package archive - -import ( - "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - "time" -) - -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. -func ApplyLayer(dest string, layer ArchiveReader) error { - // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) - - layer, err := DecompressStream(layer) - if err != nil { - return err - } - - tr := tar.NewReader(layer) - - var dirs []*tar.Header - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - if !strings.HasSuffix(hdr.Name, "/") { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 600) - if err != nil { - return err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, ".wh..wh.") { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil { - return err - } - } - continue - } - - path := filepath.Join(dest, hdr.Name) - base := filepath.Base(path) - if strings.HasPrefix(base, ".wh.") { - originalBase := base[len(".wh."):] - originalPath := filepath.Join(filepath.Dir(path), originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return err - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - - srcData := io.Reader(tr) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := createTarFile(path, dest, srcHdr, srcData); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - } - - return nil -} diff -Nru docker.io-0.9.1~dfsg1/archive/MAINTAINERS docker.io-1.3.2~dfsg1/archive/MAINTAINERS --- docker.io-0.9.1~dfsg1/archive/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Michael Crosby (@crosbymichael) diff -Nru docker.io-0.9.1~dfsg1/archive/wrap.go docker.io-1.3.2~dfsg1/archive/wrap.go --- docker.io-0.9.1~dfsg1/archive/wrap.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/archive/wrap.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -package archive - -import ( - "bytes" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff -Nru docker.io-0.9.1~dfsg1/auth/auth.go docker.io-1.3.2~dfsg1/auth/auth.go --- docker.io-0.9.1~dfsg1/auth/auth.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/auth/auth.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,290 +0,0 @@ -package auth - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "net/http" - "os" - "path" - "strings" -) - -// Where we store the config file -const CONFIGFILE = ".dockercfg" - -// Only used for user auth + account creation -const INDEXSERVER = "https://index.docker.io/v1/" - -//const INDEXSERVER = "https://indexstaging-docker.dotcloud.com/v1/" - -var ( - ErrConfigFileMissing = errors.New("The Auth config file is missing") -) - -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth"` - Email string `json:"email"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -type ConfigFile struct { - Configs map[string]AuthConfig `json:"configs,omitempty"` - rootPath string -} - -func IndexServerAddress() string { - return INDEXSERVER -} - -// create a base64 encoded auth string to store in config -func encodeAuth(authConfig *AuthConfig) string { - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decode the auth string -func decodeAuth(authStr string) (string, string, error) { - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// load up the auth config information and return values -// FIXME: use the internal golang config parser -func LoadConfig(rootPath string) (*ConfigFile, error) { - configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} - confFile := path.Join(rootPath, CONFIGFILE) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - b, err := ioutil.ReadFile(confFile) - if err != nil { - return &configFile, err - } - - if err := json.Unmarshal(b, &configFile.Configs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return &configFile, fmt.Errorf("The Auth config file is empty") - } - authConfig := AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return &configFile, err - } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] - authConfig.ServerAddress = IndexServerAddress() - configFile.Configs[IndexServerAddress()] = authConfig - } else { - for k, authConfig := range configFile.Configs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return &configFile, err - } - authConfig.Auth = "" - configFile.Configs[k] = authConfig - authConfig.ServerAddress = k - } - } - return &configFile, nil -} - -// save the auth config -func SaveConfig(configFile *ConfigFile) error { - confFile := path.Join(configFile.rootPath, CONFIGFILE) - if len(configFile.Configs) == 0 { - os.Remove(confFile) - return nil - } - - configs := make(map[string]AuthConfig, len(configFile.Configs)) - for k, authConfig := range configFile.Configs { - authCopy := authConfig - - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - configs[k] = authCopy - } - - b, err := json.Marshal(configs) - if err != nil { - return err - } - err = ioutil.WriteFile(confFile, b, 0600) - if err != nil { - return err - } - return nil -} - -// try to register/login to the registry server -func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { - var ( - status string - reqBody []byte - err error - client = &http.Client{} - reqStatusCode = 0 - serverAddress = authConfig.ServerAddress - ) - - if serverAddress == "" { - serverAddress = IndexServerAddress() - } - - loginAgainstOfficialIndex := serverAddress == IndexServerAddress() - - // to avoid sending the server address to the server it should be removed before being marshalled - authCopy := *authConfig - authCopy.ServerAddress = "" - - jsonBody, err := json.Marshal(authCopy) - if err != nil { - return "", fmt.Errorf("Config Error: %s", err) - } - - // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. - b := strings.NewReader(string(jsonBody)) - req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) - if err != nil { - return "", fmt.Errorf("Server Error: %s", err) - } - reqStatusCode = req1.StatusCode - defer req1.Body.Close() - reqBody, err = ioutil.ReadAll(req1.Body) - if err != nil { - return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) - } - - if reqStatusCode == 201 { - if loginAgainstOfficialIndex { - status = "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." - } else { - status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." - } - } else if reqStatusCode == 400 { - if string(reqBody) == "\"Username or email already exists\"" { - req, err := factory.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - status = "Login Succeeded" - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == 403 { - if loginAgainstOfficialIndex { - return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") - } - return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Registration: %s", reqBody) - } - } else if reqStatusCode == 401 { - // This case would happen with private registries where /v1/users is - // protected, so people can use `docker login` as an auth check. - req, err := factory.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - status = "Login Succeeded" - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) - } - return status, nil -} - -// this method matches a auth configuration to a server address or a url -func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { - if hostname == IndexServerAddress() || len(hostname) == 0 { - // default to the index server - return config.Configs[IndexServerAddress()] - } - - // First try the happy case - if c, found := config.Configs[hostname]; found { - return c - } - - convertToHostname := func(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - normalizedHostename := convertToHostname(hostname) - for registry, config := range config.Configs { - if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { - return config - } - } - - // When all else fails, return an empty auth config - return AuthConfig{} -} diff -Nru docker.io-0.9.1~dfsg1/auth/auth_test.go docker.io-1.3.2~dfsg1/auth/auth_test.go --- docker.io-0.9.1~dfsg1/auth/auth_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/auth/auth_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -package auth - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} - -func setupTempConfigFile() (*ConfigFile, error) { - root, err := ioutil.TempDir("", "docker-test-auth") - if err != nil { - return nil, err - } - configFile := &ConfigFile{ - rootPath: root, - Configs: make(map[string]AuthConfig), - } - - for _, registry := range []string{"testIndex", IndexServerAddress()} { - configFile.Configs[registry] = AuthConfig{ - Username: "docker-user", - Password: "docker-pass", - Email: "docker@docker.io", - } - } - - return configFile, nil -} - -func TestSameAuthDataPostSave(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - err = SaveConfig(configFile) - if err != nil { - t.Fatal(err) - } - - authConfig := configFile.Configs["testIndex"] - if authConfig.Username != "docker-user" { - t.Fail() - } - if authConfig.Password != "docker-pass" { - t.Fail() - } - if authConfig.Email != "docker@docker.io" { - t.Fail() - } - if authConfig.Auth != "" { - t.Fail() - } -} - -func TestResolveAuthConfigIndexServer(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - for _, registry := range []string{"", IndexServerAddress()} { - resolved := configFile.ResolveAuthConfig(registry) - if resolved != configFile.Configs[IndexServerAddress()] { - t.Fail() - } - } -} - -func TestResolveAuthConfigFullURL(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - registryAuth := AuthConfig{ - Username: "foo-user", - Password: "foo-pass", - Email: "foo@example.com", - } - localAuth := AuthConfig{ - Username: "bar-user", - Password: "bar-pass", - Email: "bar@example.com", - } - configFile.Configs["https://registry.example.com/v1/"] = registryAuth - configFile.Configs["http://localhost:8000/v1/"] = localAuth - configFile.Configs["registry.com"] = registryAuth - - validRegistries := map[string][]string{ - "https://registry.example.com/v1/": { - "https://registry.example.com/v1/", - "http://registry.example.com/v1/", - "registry.example.com", - "registry.example.com/v1/", - }, - "http://localhost:8000/v1/": { - "https://localhost:8000/v1/", - "http://localhost:8000/v1/", - "localhost:8000", - "localhost:8000/v1/", - }, - "registry.com": { - "https://registry.com/v1/", - "http://registry.com/v1/", - "registry.com", - "registry.com/v1/", - }, - } - - for configKey, registries := range validRegistries { - for _, registry := range registries { - var ( - configured AuthConfig - ok bool - ) - resolved := configFile.ResolveAuthConfig(registry) - if configured, ok = configFile.Configs[configKey]; !ok { - t.Fail() - } - if resolved.Email != configured.Email { - t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) - } - } - } -} diff -Nru docker.io-0.9.1~dfsg1/auth/MAINTAINERS docker.io-1.3.2~dfsg1/auth/MAINTAINERS --- docker.io-0.9.1~dfsg1/auth/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/auth/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -Sam Alba (@samalba) -Joffrey Fuhrer (@shin-) -Ken Cochrane (@kencochrane) diff -Nru docker.io-0.9.1~dfsg1/AUTHORS docker.io-1.3.2~dfsg1/AUTHORS --- docker.io-0.9.1~dfsg1/AUTHORS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/AUTHORS 2014-11-24 17:38:01.000000000 +0000 @@ -1,66 +1,126 @@ # This file lists all individuals having contributed content to the repository. -# If you're submitting a patch, please add your name here in alphabetical order as part of the patch. -# -# For a list of active project maintainers, see the MAINTAINERS file. -# +# For how it is generated, see `hack/generate-authors.sh`. + Aanand Prasad Aaron Feng +Aaron Huslage Abel Muiño +Adam Miller +Adam Singer +Aditya +Adrian Mouat +Adrien Folie +AJ Bowen +Al Tobey +alambike +Albert Zhang +Aleksa Sarai +Alex Gaynor +Alex Warhawk Alexander Larsson +Alexander Shopov +Alexandr Morozov +Alexey Kotlyarov Alexey Shamrin -Alex Gaynor Alexis THOMAS -Al Tobey +almoehi +amangoel +AnandkumarPatel +Andre Dublin <81dublin@gmail.com> Andrea Luzzardi +Andrea Turli Andreas Savvides Andreas Tiefenthaler Andrew Duckworth +Andrew France Andrew Macgregor Andrew Munsell +Andrew Weiss +Andrew Williams Andrews Medina Andy Chambers andy diller -Andy Rothfusz +Andy Goldstein +Andy Kipp +Andy Rothfusz Andy Smith Anthony Bishopric +Anton Löfgren Anton Nikitin Antony Messerli apocas +Arnaud Porterie Asbjørn Enge +Barnaby Gray Barry Allard Bartłomiej Piotrowski -Benoit Chesneau +bdevloed +Ben Firshman Ben Sargent Ben Toews Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer Bhiraj Butala +bin liu Bouke Haarsma +Boyd Hemphill Brandon Liu Brandon Philips +Brandon Rhodes +Brett Kochendorfer +Brian (bex) Exelbierd Brian Dorsey +Brian Flad Brian Goff Brian McCallister Brian Olsen Brian Shumate +Brice Jaglin Briehan Lombaard Bruno Bigras +Bruno Renié +Bryan Bess +Bryan Matsuo +Bryan Murphy Caleb Spare Calen Pennington +Cameron Boehmer Carl X. Su Charles Hooper Charles Lindsay +Charles Merriam +Charlie Lewis +Chewey Chia-liang Kao +Chris Alfonso +Chris Snow Chris St. Pierre +chrismckinnel +Christian Berendt +ChristoperBiscardi +Christophe Troestler Christopher Currie Christopher Rigor -Christophe Troestler +Ciro S. Costa Clayton Coleman Colin Dunklau Colin Rice +Colin Walters Cory Forsyth +cpuguy83 cressie176 +Cruceru Calin-Cristian +Daan van Berkel +Dafydd Crosby Dan Buch Dan Hirsch +Dan Keder +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams Daniel Exner Daniel Garcia Daniel Gasienica @@ -70,33 +130,53 @@ Daniel Robinson Daniel Von Fange Daniel YC Lin +Daniel, Dao Quang Minh +Danny Berger Danny Yates Darren Coxall +Darren Shepherd David Anderson David Calavera +David Corking +David Gageot David Mcanulty +David Röthlisberger David Sissitka Deni Bertovic +Derek +Deric Crago Dinesh Subhraveti +Djibril Koné dkumor Dmitry Demeshchuk +Dolph Mathews Dominik Honnef Don Spaulding -Dražen Lučanin +Doug Davis +doug tangren Dr Nic Williams +Dražen Lučanin Dustin Sallings Edmund Wagner +Eiichi Tsukata +Eivind Uggedal Elias Probst Emil Hernvall Emily Rose Eric Hanchrow Eric Lee Eric Myhre +Eric Windisch +Eric Windisch +Erik Hollensbe +Erik Inge Bolsø Erno Hopearuoho eugenkrizo +Evan Hazlett Evan Krall Evan Phoenix Evan Wies +evanderkoogh Eystein Måløy Stenberg ezbercih Fabio Falci @@ -104,126 +184,222 @@ Fabrizio Regini Faiz Khan Fareed Dudhia +Felix Rabe Fernando Flavio Castelli +FLGMwt +Francisco Carriedo Francisco Souza Frank Macreery +Fred Lifton Frederick F. Kautz IV Frederik Loeffert Freek Kalter Gabe Rosenhouse +Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove +Geoffrey Bachelet Gereon Frey +German DZ Gert van Valkenhoef +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Glyn Normington +Goffert van Gool Graydon Hoare Greg Thornton grunny +Guilherme Salgado Guillaume J. Charmes Gurjeet Singh Guruprasad +Harald Albers Harley Laue Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hollie Teal +hollietealok Hunter Blanks +hyeongkyu.lee +Ian Babrou +Ian Bull +Ian Main +Ian Truslove +ILYA Khlopotov inglesp Isaac Dupree +Isabel Jimenez Isao Jonas +Ivan Fraixedes +Jack Danger Canty Jake Moshenko +jakedt James Allen James Carr +James DeFelice +James Harrison Fisher +James Kyle James Mills James Turnbull +Jan Pazdziora +Jan Toebes +Jaroslaw Zabiello jaseg +Jason Giedymin +Jason Hall +Jason Livesay Jason McVetta +Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jeff Lindsay +Jeff Welch +Jeffrey Bolle Jeremy Grosser -Jérôme Petazzoni Jesse Dubay +Jezeniel Zapanta +Jilles Oldenbeuving Jim Alateras +Jim Perrin Jimmy Cuadra +Jiří Župka Joe Beda +Joe Shaw Joe Van Dyk -Joffrey F +Joel Handwell +Joffrey F Johan Euphrosine -Johannes 'fish' Ziemke Johan Rydberg +Johannes 'fish' Ziemke John Costa John Feminella John Gardiner Myers +John OBrien III John Warwick +Jon Wedaman Jonas Pfenniger +Jonathan Boulle +Jonathan Camp +Jonathan McCrohan Jonathan Mueller +Jonathan Pares Jonathan Rudenberg -Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager +Josh Josh Hawn Josh Poimboeuf JP Julien Barbier +Julien Bordellier Julien Dubois Justin Force Justin Plock +Justin Simonelis +Jérôme Petazzoni Karan Lyons Karl Grzeszczak +Kato Kazuyoshi Kawsar Saiyeed Keli Hu Ken Cochrane +Ken ICHIKAWA +Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh +Kevin Menard +Kevin Wallace Keyvan Fatehi -kim0 +kies Kim BKC Carlbacker +kim0 Kimbro Staken Kiran Gangadharan +knappe +Kohei Tsuruta Konstantin Pelykh Kyle Conroy +kyu +Lachlan Coote +lalyos +Lance Chen +Lars R. Damerow Laurie Voss +leeplay +Len Weincier +Levi Gross +Lewis Peckover Liang-Chi Hsieh -Lokesh Mandvekar +Lokesh Mandvekar Louis Opter lukaspustina +lukemarsden Mahesh Tiyyagura +Manfred Zabarauskas Manuel Meurer Manuel Woelker +Marc Abramowitz Marc Kuo +Marc Tamsky Marco Hennings Marcus Farkas Marcus Ramberg +marcuslinke Marek Goldmann +Marius Voila Mark Allen Mark McGranaghan Marko Mikulicic Markus Fix Martijn van Oosterhout Martin Redmond +Mason Malone +Mateusz Sulima Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard +Matthew Heon Matthew Mueller +Matthias Klumpp +Matthias Kühnle mattymo -Maxime Petazzoni +mattyw +Max Shytikov Maxim Treskin +Maxime Petazzoni meejah -Michael Crosby +Michael Brown +Michael Crosby Michael Gorsuch +Michael Neale +Michael Prokop Michael Stapelberg +Michaël Pailloncy +Michiel@unhosted Miguel Angel Fernández +Mike Chelen Mike Gaffney +Mike MacCana Mike Naberezny +Mike Snitzer Mikhail Sobolev Mohit Soni +Morgante Pell Morten Siebuhr +Mrunal Patel Nan Monnand Deng +Naoki Orii Nate Jones Nathan Kleyn +Nathan LeClaire Nelson Chen Niall O'Higgins Nick Payne @@ -231,87 +407,147 @@ Nick Stinemates Nicolas Dudebout Nicolas Kaiser +NikolaMandic +noducks Nolan Darilek +O.S. Tezer +OddBloke odk- Oguz Bilgic Ole Reifschneider -O.S.Tezer +Olivier Gambier pandrew Pascal Borreli +Patrick Hemmer pattichen +Paul +Paul Annesley Paul Bowsher Paul Hammond +Paul Jimenez Paul Lietar Paul Morie Paul Nasrat -Paul +Paul Weaver +Peter Bourgon Peter Braden -Peter Waller +Peter Waller +Phil Phil Spitler +Phillip Alexander Piergiuliano Bossi Pierre-Alain RIVIERE Piotr Bogdan pysqz Quentin Brossard +r0n22 Rafal Jeczalik +Rajat Pandit +Rajdeep Dua +Ralph Bean Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon rgstephens Rhys Hiltner +Richard Harvey Richo Healey Rick Bradley +Rick van de Loo +Robert Bachmann Robert Obryk Roberto G. Hashioka -Roberto Hashioka +Robin Speekenbrink +robpc Rodrigo Vaz Roel Van Nyen Roger Peppe +Rohit Jnagal +Roland Huß +Roland Moriz +Ron Smits +Rovanion Luckey +Rudolph Gottesheim +Ryan Anderson +Ryan Aslett Ryan Fowler Ryan O'Donnell Ryan Seto +Ryan Thomas +s-ko Sam Alba +Sam Bailey Sam J Sharpe +Sam Reis +Sam Rijs Samuel Andaya +satoru +Satoshi Amemiya Scott Bessler +Scott Collier Sean Cronin Sean P. Kane +Sebastiaan van Stijn +Sebastiaan van Stijn +Senthil Kumar Selvaraj +SeongJae Park +Shane Canon +shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee -shin- Silas Sewell Simon Taranto +Sindhu S Sjoerd Langkemper -Solomon Hykes +Solomon Hykes Song Gao +Soulou +soulshake Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz +Stephen Crosby +Steven Burgess sudosurootdev Sven Dowideit Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq tang0th Tatsuki Sugiura Tehmasp Chaudhri -Thatcher Peskens +Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL +Thomas Schroeter Tianon Gravi -Tim Bosse +Tibor Vass +Tim Bosse +Tim Ruffles +Tim Ruffles Tim Terhorst +Timothy Hobbs +tjmehta Tobias Bieniek +Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter +Tom Fotherby Tom Hulihan +Tom Maaswinkel Tommaso Visconti +Tony Daws +tpng Travis Cline +Trent Ogren Tyler Brock Tzu-Jung Lee Ulysse Carion @@ -319,26 +555,43 @@ vgeta Victor Coisne Victor Lyuboslavsky +Victor Marmol Victor Vieux +Viktor Vojnovski Vincent Batts Vincent Bernat +Vincent Mayers Vincent Woo Vinod Kulkarni +Vishnu Kannan Vitor Monteiro Vivek Agarwal +Vladimir Bulyga Vladimir Kirillov -Vladimir Rutsky +Vladimir Rutsky +waitingkuo +Walter Leibbrandt Walter Stanish WarheadsSE Wes Morgan Will Dietz -William Delanoue Will Rouesnel Will Weaver +William Delanoue +William Henry +William Riancho +William Thurston +wyc Xiuming Chen Yang Bai +Yasunori Mahata Yurii Rashkovskii +Zac Dover Zain Memon Zaiste! +Zane DeGraffenried Zilin Du zimbatm +Zoltan Tombol +zqh +Álvaro Lázaro diff -Nru docker.io-0.9.1~dfsg1/builder/dispatchers.go docker.io-1.3.2~dfsg1/builder/dispatchers.go --- docker.io-0.9.1~dfsg1/builder/dispatchers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/dispatchers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,353 @@ +package builder + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig" +) + +// dispatch with no layer / parsing. This is effectively not a command. +func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error { + return nil +} + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 2 { + return fmt.Errorf("ENV accepts two arguments") + } + + fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) + + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if args[0] == envParts[0] { + b.Config.Env[i] = fullEnv + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + } + } + b.Config.Env = append(b.Config.Env, fullEnv) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("MAINTAINER requires only one argument") + } + + b.maintainer = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("ADD requires at least two arguments") + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return fmt.Errorf("COPY requires at least two arguments") + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + name := args[0] + + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + if b.Daemon.Graph().IsNotExist(err) { + image, err = b.pullImage(name) + } + + // note that the top level err will still be !nil here if IsNotExist is + // not the error. This approach just simplifies hte logic a bit. + if err != nil { + return err + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.Config.OnBuild = append(b.Config.OnBuild, original) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("WORKDIR requires exactly one argument") + } + + workdir := args[0] + + if workdir[0] == '/' { + b.Config.WorkingDir = workdir + } else { + if b.Config.WorkingDir == "" { + b.Config.WorkingDir = "/" + } + b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir) + } + + return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' in the event there is only one argument. The difference in +// processing: +// +// RUN echo hi # sh -c echo hi +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + args = handleJsonArgs(args, attributes) + + if len(args) == 1 { + args = append([]string{"/bin/sh", "-c"}, args[0]) + } + + runCmd := flag.NewFlagSet("run", flag.ContinueOnError) + runCmd.SetOutput(ioutil.Discard) + runCmd.Usage = nil + + config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil) + if err != nil { + return err + } + + cmd := b.Config.Cmd + // set Cmd manually, this is special case only for Dockerfiles + b.Config.Cmd = config.Cmd + runconfig.Merge(b.Config, config) + + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + b.Config.Cmd = handleJsonArgs(args, attributes) + + if !attributes["json"] { + b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", b.Config.Cmd)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will +// accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + parsed := handleJsonArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + b.Config.Entrypoint = nil + default: + // ENTRYPOINT echo hi + b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.Config.Cmd = nil + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.Config.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if b.Config.ExposedPorts == nil { + b.Config.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...)) + if err != nil { + return err + } + + for port := range ports { + if _, exists := b.Config.ExposedPorts[port]; !exists { + b.Config.ExposedPorts[port] = struct{}{} + } + } + b.Config.PortSpecs = nil + + return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("USER requires exactly one argument") + } + + b.Config.User = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return fmt.Errorf("Volume cannot be empty") + } + + if b.Config.Volumes == nil { + b.Config.Volumes = map[string]struct{}{} + } + for _, v := range args { + b.Config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// INSERT is no longer accepted, but we still parse it. +func insert(b *Builder, args []string, attributes map[string]bool, original string) error { + return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") +} diff -Nru docker.io-0.9.1~dfsg1/builder/evaluator.go docker.io-1.3.2~dfsg1/builder/evaluator.go --- docker.io-0.9.1~dfsg1/builder/evaluator.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/evaluator.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,236 @@ +// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of resposibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package builder + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +var ( + ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]struct{}{ + "env": {}, + "add": {}, + "copy": {}, + "workdir": {}, + "expose": {}, + "volume": {}, + "user": {}, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + "env": env, + "maintainer": maintainer, + "add": add, + "copy": dispatchCopy, // copy() is a go builtin + "from": from, + "onbuild": onbuild, + "workdir": workdir, + "run": run, + "cmd": cmd, + "entrypoint": entrypoint, + "expose": expose, + "volume": volume, + "user": user, + "insert": insert, + } +} + +// internal struct, used to maintain configuration of the Dockerfile's +// processing as it evaluates the parsing result. +type Builder struct { + Daemon *daemon.Daemon + Engine *engine.Engine + + // effectively stdio for the run. Because it is not stdio, I said + // "Effectively". Do not use stdio anywhere in this package for any reason. + OutStream io.Writer + ErrStream io.Writer + + Verbose bool + UtilizeCache bool + + // controls how images and containers are handled between steps. + Remove bool + ForceRemove bool + + AuthConfig *registry.AuthConfig + AuthConfigFile *registry.ConfigFile + + // Deprecated, original writer used for ImagePull. To be removed. + OutOld io.Writer + StreamFormatter *utils.StreamFormatter + + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + TmpContainers map[string]struct{} // a map of containers used for removes + + dockerfile *parser.Node // the syntax tree of the dockerfile + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + +} + +// Run the builder with the context. This is the lynchpin of this package. This +// will (barring errors): +// +// * call readContext() which will set up the temporary directory and unpack +// the context into it. +// * read the dockerfile +// * parse the dockerfile +// * walk the parse tree and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Print a happy message and return the image ID. +// +func (b *Builder) Run(context io.Reader) (string, error) { + if err := b.readContext(context); err != nil { + return "", err + } + + defer func() { + if err := os.RemoveAll(b.contextPath); err != nil { + log.Debugf("[BUILDER] failed to remove temporary context: %s", err) + } + }() + + filename := path.Join(b.contextPath, "Dockerfile") + + fi, err := os.Stat(filename) + if os.IsNotExist(err) { + return "", fmt.Errorf("Cannot build a directory without a Dockerfile") + } + if fi.Size() == 0 { + return "", ErrDockerfileEmpty + } + + f, err := os.Open(filename) + if err != nil { + return "", err + } + + defer f.Close() + + ast, err := parser.Parse(f) + if err != nil { + return "", err + } + + b.dockerfile = ast + + // some initializations that would not have been supplied by the caller. + b.Config = &runconfig.Config{} + b.TmpContainers = map[string]struct{}{} + + for i, n := range b.dockerfile.Children { + if err := b.dispatch(i, n); err != nil { + if b.ForceRemove { + b.clearTmp() + } + return "", err + } + fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image)) + if b.Remove { + b.clearTmp() + } + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") + } + + fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + return b.image, nil +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + attrs := ast.Attributes + original := ast.Original + strs := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) + + if cmd == "onbuild" { + ast = ast.Next.Children[0] + strs = append(strs, ast.Value) + msg += " " + ast.Value + } + + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if _, ok := replaceEnvAllowed[cmd]; ok { + str = b.replaceEnv(ast.Value) + } + strs = append(strs, str) + msg += " " + ast.Value + } + + fmt.Fprintln(b.OutStream, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + return f(b, strs, attrs, original) + } + + fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/builder/internals.go docker.io-1.3.2~dfsg1/builder/internals.go --- docker.io-0.9.1~dfsg1/builder/internals.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/internals.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,689 @@ +package builder + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/daemon" + imagepkg "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (b *Builder) readContext(context io.Reader) error { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return err + } + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return err + } + + if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { + return err + } + + if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { + return err + } + + b.contextPath = tmpdirPath + return nil +} + +func (b *Builder) commit(id string, autoCmd []string, comment string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.Config.Image = b.image + if id == "" { + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, err := b.create() + if err != nil { + return err + } + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container := b.Daemon.Get(id) + if container == nil { + return fmt.Errorf("An error occured while creating the container") + } + + // Note: Actually copy the struct + autoConfig := *b.Config + autoConfig.Cmd = autoCmd + + // Commit the container + image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + if err != nil { + return err + } + b.image = image.ID + return nil +} + +type copyInfo struct { + origPath string + destPath string + hash string + decompress bool + tmpDir string +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + dest := args[len(args)-1] // last one is always the dest + + copyInfos := []*copyInfo{} + + b.Config.Image = b.image + + defer func() { + for _, ci := range copyInfos { + if ci.tmpDir != "" { + os.RemoveAll(ci.tmpDir) + } + } + }() + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + for _, orig := range args[0 : len(args)-1] { + err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression) + if err != nil { + return err + } + } + + if len(copyInfos) == 0 { + return fmt.Errorf("No source files were specified") + } + + if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one CI then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(copyInfos) == 1 { + srcHash = copyInfos[0].hash + origPaths = copyInfos[0].origPath + } else { + var hashs []string + var origs []string + for _, ci := range copyInfos { + hashs = append(hashs, ci.hash) + origs = append(origs, ci.origPath) + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + // If we do not have at least one hash, never use the cache + if hit && b.UtilizeCache { + return nil + } + + container, _, err := b.Daemon.Create(b.Config, nil, "") + if err != nil { + return err + } + b.TmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + for _, ci := range copyInfos { + if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { + return err + } + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { + return err + } + return nil +} + +func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { + + if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "./") + + // In the remote/URL case, download it and gen its hashcode + if utils.IsURL(origPath) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath // default to this but can change + ci.destPath = destPath + ci.decompress = false + *cInfos = append(*cInfos, &ci) + + // Initiate the download + resp, err := utils.Download(ci.origPath) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + ci.tmpDir = tmpDirName + + // Create a tmp file within our tmp dir + tmpFileName := path.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { + tmpFile.Close() + return err + } + fmt.Fprintf(b.OutStream, "\n") + tmpFile.Close() + + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + + ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(ci.destPath, "/") { + u, err := url.Parse(origPath) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + parts := strings.Split(path, "/") + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + ci.destPath = ci.destPath + filename + } + + // Calc the checksum, only if we're using the cache + if b.UtilizeCache { + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) + if err != nil { + return err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + ci.hash = tarSum.Sum(nil) + r.Close() + } + + return nil + } + + // Deal with wildcards + if ContainsWildcards(origPath) { + for _, fileInfo := range b.context.GetSums() { + if fileInfo.Name() == "" { + continue + } + match, _ := path.Match(origPath, fileInfo.Name()) + if !match { + continue + } + + calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) + } + return nil + } + + // Must be a dir or a file + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + fi, _ := os.Stat(path.Join(b.contextPath, origPath)) + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath + ci.destPath = destPath + ci.decompress = allowDecompression + *cInfos = append(*cInfos, &ci) + + // If not using cache don't need to do anything else. + // If we are using a cache then calc the hash for the src file/dir + if !b.UtilizeCache { + return nil + } + + // Deal with the single file case + if !fi.IsDir() { + // This will match first file in sums of the archive + fis := b.context.GetSums().GetFile(ci.origPath) + if fis != nil { + ci.hash = "file:" + fis.Sum() + } + return nil + } + + // Must be a dir + var subfiles []string + absOrigPath := path.Join(b.contextPath, ci.origPath) + + // Add a trailing / to make sure we only pick up nested files under + // the dir and not sibling files of the dir that just happen to + // start with the same chars + if !strings.HasSuffix(absOrigPath, "/") { + absOrigPath += "/" + } + + // Need path w/o / too to find matching dir w/o trailing / + absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] + + for _, fileInfo := range b.context.GetSums() { + absFile := path.Join(b.contextPath, fileInfo.Name()) + if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash { + subfiles = append(subfiles, fileInfo.Sum()) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + + return nil +} + +func ContainsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { + remote, tag := parsers.ParseRepositoryTag(name) + if tag == "" { + tag = "latest" + } + pullRegistryAuth := b.AuthConfig + if len(b.AuthConfigFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return nil, err + } + resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.StreamFormatter.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.OutOld) + if err := job.Run(); err != nil { + return nil, err + } + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + return nil, err + } + + return image, nil +} + +func (b *Builder) processImageFrom(img *imagepkg.Image) error { + b.image = img.ID + + if img.Config != nil { + b.Config = img.Config + } + + if len(b.Config.Env) == 0 { + b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. + onBuildTriggers := b.Config.OnBuild + b.Config.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for stepN, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step) + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.Config` pair +// in the current server `b.Daemon`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + if b.UtilizeCache { + if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil { + return false, err + } else if cache != nil { + fmt.Fprintf(b.OutStream, " ---> Using cache\n") + log.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + return true, nil + } else { + log.Debugf("[BUILDER] Cache miss") + } + } + return false, nil +} + +func (b *Builder) create() (*daemon.Container, error) { + if b.image == "" { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.Config.Image = b.image + + config := *b.Config + + // Create the container + c, warnings, err := b.Daemon.Create(b.Config, nil, "") + if err != nil { + return nil, err + } + for _, warning := range warnings { + fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) + } + + b.TmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + c.Path = config.Cmd[0] + c.Args = config.Cmd[1:] + + return c, nil +} + +func (b *Builder) run(c *daemon.Container) error { + var errCh chan error + if b.Verbose { + errCh = promise.Go(func() error { + // FIXME: call the 'attach' job so that daemon.Attach can be made private + // + // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach + // but without hijacking for stdin. Also, with attach there can be race + // condition because of some output already was printed before it. + return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream) + }) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + if errCh != nil { + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { + err := &utils.JSONError{ + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), + Code: ret, + } + return err + } + + return nil +} + +func (b *Builder) checkPathForAddition(orig string) error { + origPath := path.Join(b.contextPath, orig) + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + if !strings.HasPrefix(origPath, b.contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + if _, err := os.Stat(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { + var ( + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) + ) + + if destPath != container.RootfsPath() { + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) + if err != nil { + return err + } + } + + // Preserve the trailing '/' + if strings.HasSuffix(dest, "/") || dest == "." { + destPath = destPath + "/" + } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + return copyAsDirectory(origPath, destPath, destExists) + } + + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { + return nil + } else if err != io.EOF { + log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + } + } + + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { + return err + } + + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) + } + + return fixPermissions(resPath, 0, 0) +} + +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func (b *Builder) clearTmp() { + for c := range b.TmpContainers { + tmp := b.Daemon.Get(c) + if err := b.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + return + } + b.Daemon.DeleteVolumes(tmp.VolumePaths()) + delete(b.TmpContainers, c) + fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } +} diff -Nru docker.io-0.9.1~dfsg1/builder/job.go docker.io-1.3.2~dfsg1/builder/job.go --- docker.io-0.9.1~dfsg1/builder/job.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/job.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,130 @@ +package builder + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type BuilderJob struct { + Engine *engine.Engine + Daemon *daemon.Daemon +} + +func (b *BuilderJob) Install() { + b.Engine.Register("build", b.CmdBuild) +} + +func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + + repoName, tag = parsers.ParseRepositoryTag(repoName) + if repoName != "" { + if _, _, err := registry.ResolveRepositoryName(repoName); err != nil { + return job.Error(err) + } + if len(tag) > 0 { + if err := graph.ValidateTagName(tag); err != nil { + return job.Error(err) + } + } + } + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + builder := &Builder{ + Daemon: b.Daemon, + Engine: b.Engine, + OutStream: &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + ErrStream: &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + Verbose: !suppressOutput, + UtilizeCache: !noCache, + Remove: rm, + ForceRemove: forceRm, + OutOld: job.Stdout, + StreamFormatter: sf, + AuthConfig: authConfig, + AuthConfigFile: configFile, + } + + id, err := builder.Run(context) + if err != nil { + return job.Error(err) + } + + if repoName != "" { + b.Daemon.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/builder/MAINTAINERS docker.io-1.3.2~dfsg1/builder/MAINTAINERS --- docker.io-0.9.1~dfsg1/builder/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Tibor Vass (@tiborvass) +Erik Hollensbe (@erikh) diff -Nru docker.io-0.9.1~dfsg1/builder/parser/dumper/main.go docker.io-1.3.2~dfsg1/builder/parser/dumper/main.go --- docker.io-0.9.1~dfsg1/builder/parser/dumper/main.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/dumper/main.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + + ast, err := parser.Parse(f) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/builder/parser/line_parsers.go docker.io-1.3.2~dfsg1/builder/parser/line_parsers.go --- docker.io-0.9.1~dfsg1/builder/parser/line_parsers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/line_parsers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,155 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + _, child, err := parseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseEnv(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + strs := TOKEN_WHITESPACE.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf("ENV must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + prevnode := node + for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + var ( + myJson []interface{} + next = &Node{} + orignext = next + prevnode = next + ) + + if err := json.Unmarshal([]byte(rest), &myJson); err != nil { + return nil, nil, err + } + + for _, str := range myJson { + switch str.(type) { + case string: + case float64: + str = strconv.FormatFloat(str.(float64), 'G', -1, 64) + default: + return nil, nil, errDockerfileJSONNesting + } + next.Value = str.(string) + next.Next = &Node{} + prevnode = next + next = next.Next + } + + prevnode.Next = nil + + return orignext, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimSpace(rest) + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attmpts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimSpace(rest) + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff -Nru docker.io-0.9.1~dfsg1/builder/parser/parser.go docker.io-1.3.2~dfsg1/builder/parser/parser.go --- docker.io-0.9.1~dfsg1/builder/parser/parser.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/parser.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,140 @@ +// This package implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) + TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\\s*$`) + TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // recieves the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propogated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + "user": parseString, + "onbuild": parseSubCommand, + "workdir": parseString, + "env": parseEnv, + "maintainer": parseString, + "from": parseString, + "add": parseStringsWhitespaceDelimited, + "copy": parseStringsWhitespaceDelimited, + "run": parseMaybeJSON, + "cmd": parseMaybeJSON, + "entrypoint": parseMaybeJSON, + "expose": parseStringsWhitespaceDelimited, + "volume": parseMaybeJSONToList, + "insert": parseIgnore, + } +} + +// parse a line and return the remainder. +func parseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if TOKEN_LINE_CONTINUATION.MatchString(line) { + line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + if sexp.Value != "" || sexp.Next != nil || sexp.Children != nil { + node.Next = sexp + } + + node.Attributes = attrs + node.Original = line + + return "", node, nil +} + +// The main parse routine. Handles an io.ReadWriteCloser and returns the root +// of the AST. +func Parse(rwc io.Reader) (*Node, error) { + root := &Node{} + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + if stripComments(scannedLine) == "" { + continue + } + + line, child, err := parseLine(scannedLine) + if err != nil { + return nil, err + } + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = parseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + } + + if child != nil { + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff -Nru docker.io-0.9.1~dfsg1/builder/parser/parser_test.go docker.io-1.3.2~dfsg1/builder/parser/parser_test.go --- docker.io-0.9.1~dfsg1/builder/parser/parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/parser_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,82 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" + +func getDirs(t *testing.T, dir string) []os.FileInfo { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdir(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + _, err = Parse(df) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir.Name()) + } + + df.Close() + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile") + resultfile := filepath.Join(testDir, dir.Name(), "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + rf, err := os.Open(resultfile) + if err != nil { + t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error()) + } + + ast, err := Parse(df) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error()) + } + + content, err := ioutil.ReadAll(rf) + if err != nil { + t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error()) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name()) + } + + df.Close() + rf.Close() + } +} diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-consuldock/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-consuldock/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-consuldock/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-consuldock/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,25 @@ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-consuldock/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-consuldock/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-consuldock/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-consuldock/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-docker-consul/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-docker-consul/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-docker-consul/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-docker-consul/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-docker-consul/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-docker-consul/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/brimstone-docker-consul/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/brimstone-docker-consul/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/continueIndent/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/continueIndent/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/continueIndent/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/continueIndent/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/continueIndent/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/continueIndent/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/continueIndent/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/continueIndent/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/cpuguy83-nagios/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/cpuguy83-nagios/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/cpuguy83-nagios/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/cpuguy83-nagios/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/cpuguy83-nagios/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/cpuguy83-nagios/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/cpuguy83-nagios/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/cpuguy83-nagios/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/docker/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/docker/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/docker/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/docker/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,105 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: Apparmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +docker-version 0.6.1 +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get code.google.com/p/go.tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/docker/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/docker/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/docker/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/docker/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,25 @@ +(docker-version) +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get code.google.com/p/go.tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/escapes/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/escapes/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/escapes/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/escapes/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/escapes/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/escapes/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/escapes/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/escapes/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/influxdb/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/influxdb/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/influxdb/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/influxdb/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/influxdb/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/influxdb/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/influxdb/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/influxdb/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/kartar-entrypoint-oddities/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/lk4d4-the-edge-case-generator/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/mail/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/mail/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/mail/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/mail/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/mail/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/mail/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/mail/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/mail/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/multiple-volumes/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/multiple-volumes/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/multiple-volumes/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/multiple-volumes/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/multiple-volumes/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/multiple-volumes/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/multiple-volumes/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/multiple-volumes/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/mumble/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/mumble/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/mumble/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/mumble/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/mumble/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/mumble/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/mumble/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/mumble/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/nginx/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/nginx/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/nginx/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/nginx/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/nginx/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/nginx/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/nginx/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/nginx/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/tf2/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/tf2/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/tf2/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/tf2/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/tf2/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/tf2/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/tf2/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/tf2/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/weechat/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/weechat/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/weechat/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/weechat/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/weechat/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/weechat/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/weechat/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/weechat/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/znc/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles/znc/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/znc/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/znc/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles/znc/result docker.io-1.3.2~dfsg1/builder/parser/testfiles/znc/result --- docker.io-0.9.1~dfsg1/builder/parser/testfiles/znc/result 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles/znc/result 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/env_equals_env/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/env_equals_env/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/env_equals_env/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/env_equals_env/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH=PATH diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ + + diff -Nru docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile --- docker.io-0.9.1~dfsg1/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff -Nru docker.io-0.9.1~dfsg1/builder/parser/utils.go docker.io-1.3.2~dfsg1/builder/parser/utils.go --- docker.io-0.9.1~dfsg1/builder/parser/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/parser/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,94 @@ +package parser + +import ( + "fmt" + "strings" +) + +// QuoteString walks characters (after trimming), escapes any quotes and +// escapes, then wraps the whole thing in quotes. Very useful for generating +// argument output in nodes. +func QuoteString(str string) string { + result := "" + chars := strings.Split(strings.TrimSpace(str), "") + + for _, char := range chars { + switch char { + case `"`: + result += `\"` + case `\`: + result += `\\` + default: + result += char + } + } + + return `"` + result + `"` +} + +// dumps the AST defined by `node` as a list of sexps. Returns a string +// suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + QuoteString(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, string, error) { + cmdline := TOKEN_WHITESPACE.Split(line, 2) + + if len(cmdline) != 2 { + return "", "", fmt.Errorf("We do not understand this file. Please ensure it is a valid Dockerfile. Parser error at %q", line) + } + + cmd := strings.ToLower(cmdline[0]) + // the cmd should never have whitespace, but it's possible for the args to + // have trailing whitespace. + return cmd, strings.TrimSpace(cmdline[1]), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if TOKEN_COMMENT.MatchString(line) { + return TOKEN_COMMENT.ReplaceAllString(line, "") + } + + return line +} diff -Nru docker.io-0.9.1~dfsg1/builder/support.go docker.io-1.3.2~dfsg1/builder/support.go --- docker.io-0.9.1~dfsg1/builder/support.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builder/support.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,59 @@ +package builder + +import ( + "regexp" + "strings" +) + +var ( + // `\\\\+|[^\\]|\b|\A` - match any number of "\\" (ie, properly-escaped backslashes), or a single non-backslash character, or a word boundary, or beginning-of-line + // `\$` - match literal $ + // `[[:alnum:]_]+` - match things like `$SOME_VAR` + // `{[[:alnum:]_]+}` - match things like `${SOME_VAR}` + tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`) + // this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly +) + +// handle environment replacement. Used in dispatcher. +func (b *Builder) replaceEnv(str string) string { + for _, match := range tokenEnvInterpolation.FindAllString(str, -1) { + idx := strings.Index(match, "\\$") + if idx != -1 { + if idx+2 >= len(match) { + str = strings.Replace(str, match, "\\$", -1) + continue + } + + prefix := match[:idx] + stripped := match[idx+2:] + str = strings.Replace(str, match, prefix+"$"+stripped, -1) + continue + } + + match = match[strings.Index(match, "$"):] + matchKey := strings.Trim(match, "${}") + + for _, keyval := range b.Config.Env { + tmp := strings.SplitN(keyval, "=", 2) + if tmp[0] == matchKey { + str = strings.Replace(str, match, tmp[1], -1) + break + } + } + } + + return str +} + +func handleJsonArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff -Nru docker.io-0.9.1~dfsg1/buildfile.go docker.io-1.3.2~dfsg1/buildfile.go --- docker.io-0.9.1~dfsg1/buildfile.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/buildfile.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,812 +0,0 @@ -package docker - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" -) - -var ( - ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") -) - -type BuildFile interface { - Build(io.Reader) (string, error) - CmdFrom(string) error - CmdRun(string) error -} - -type buildFile struct { - runtime *Runtime - srv *Server - - image string - maintainer string - config *runconfig.Config - - contextPath string - context *utils.TarSum - - verbose bool - utilizeCache bool - rm bool - - authConfig *auth.AuthConfig - configFile *auth.ConfigFile - - tmpContainers map[string]struct{} - tmpImages map[string]struct{} - - outStream io.Writer - errStream io.Writer - - // Deprecated, original writer used for ImagePull. To be removed. - outOld io.Writer - sf *utils.StreamFormatter -} - -func (b *buildFile) clearTmp(containers map[string]struct{}) { - for c := range containers { - tmp := b.runtime.Get(c) - if err := b.runtime.Destroy(tmp); err != nil { - fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) - } else { - fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) - } - } -} - -func (b *buildFile) CmdFrom(name string) error { - image, err := b.runtime.repositories.LookupImage(name) - if err != nil { - if b.runtime.graph.IsNotExist(err) { - remote, tag := utils.ParseRepositoryTag(name) - pullRegistryAuth := b.authConfig - if len(b.configFile.Configs) > 0 { - // The request came with a full auth config file, we prefer to use that - endpoint, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) - pullRegistryAuth = &resolvedAuth - } - job := b.srv.Eng.Job("pull", remote, tag) - job.SetenvBool("json", b.sf.Json()) - job.SetenvBool("parallel", true) - job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.outOld) - if err := job.Run(); err != nil { - return err - } - image, err = b.runtime.repositories.LookupImage(name) - if err != nil { - return err - } - } else { - return err - } - } - b.image = image.ID - b.config = &runconfig.Config{} - if image.Config != nil { - b.config = image.Config - } - if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "HOME=/", "PATH="+defaultPathEnv) - } - // Process ONBUILD triggers if they exist - if nTriggers := len(b.config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) - } - for n, step := range b.config.OnBuild { - splitStep := strings.Split(step, " ") - stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) - switch stepInstruction { - case "ONBUILD": - return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) - case "MAINTAINER", "FROM": - return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) - } - if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { - return err - } - } - b.config.OnBuild = []string{} - return nil -} - -// The ONBUILD command declares a build instruction to be executed in any future build -// using the current image as a base. -func (b *buildFile) CmdOnbuild(trigger string) error { - splitTrigger := strings.Split(trigger, " ") - triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - b.config.OnBuild = append(b.config.OnBuild, trigger) - return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) -} - -func (b *buildFile) CmdMaintainer(name string) error { - b.maintainer = name - return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) -} - -// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) -// and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.srv`. If an image is found, probeCache returns -// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there -// is any error, it returns `(false, err)`. -func (b *buildFile) probeCache() (bool, error) { - if b.utilizeCache { - if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil { - return false, err - } else if cache != nil { - fmt.Fprintf(b.outStream, " ---> Using cache\n") - utils.Debugf("[BUILDER] Use cached version") - b.image = cache.ID - return true, nil - } else { - utils.Debugf("[BUILDER] Cache miss") - } - } - return false, nil -} - -func (b *buildFile) CmdRun(args string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = nil - runconfig.Merge(b.config, config) - - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - utils.Debugf("Command to be executed: %v", b.config.Cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - c, err := b.create() - if err != nil { - return err - } - // Ensure that we keep the container mounted until the commit - // to avoid unmounting and then mounting directly again - c.Mount() - defer c.Unmount() - - err = b.run(c) - if err != nil { - return err - } - if err := b.commit(c.ID, cmd, "run"); err != nil { - return err - } - - return nil -} - -func (b *buildFile) FindEnvKey(key string) int { - for k, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if key == envParts[0] { - return k - } - } - return -1 -} - -func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { - exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") - if err != nil { - return value, err - } - matches := exp.FindAllString(value, -1) - for _, match := range matches { - match = match[strings.Index(match, "$"):] - matchKey := strings.Trim(match, "${}") - - for _, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - envKey := envParts[0] - envValue := envParts[1] - - if envKey == matchKey { - value = strings.Replace(value, match, envValue, -1) - break - } - } - } - return value, nil -} - -func (b *buildFile) CmdEnv(args string) error { - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ENV format") - } - key := strings.Trim(tmp[0], " \t") - value := strings.Trim(tmp[1], " \t") - - envKey := b.FindEnvKey(key) - replacedValue, err := b.ReplaceEnvMatches(value) - if err != nil { - return err - } - replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) - - if envKey >= 0 { - b.config.Env[envKey] = replacedVar - } else { - b.config.Env = append(b.config.Env, replacedVar) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) -} - -func (b *buildFile) buildCmdFromJson(args string) []string { - var cmd []string - if err := json.Unmarshal([]byte(args), &cmd); err != nil { - utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) - cmd = []string{"/bin/sh", "-c", args} - } - return cmd -} - -func (b *buildFile) CmdCmd(args string) error { - cmd := b.buildCmdFromJson(args) - b.config.Cmd = cmd - if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdEntrypoint(args string) error { - entrypoint := b.buildCmdFromJson(args) - b.config.Entrypoint = entrypoint - if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdExpose(args string) error { - ports := strings.Split(args, " ") - b.config.PortSpecs = append(ports, b.config.PortSpecs...) - return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) -} - -func (b *buildFile) CmdUser(args string) error { - b.config.User = args - return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) -} - -func (b *buildFile) CmdInsert(args string) error { - return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdCopy(args string) error { - return fmt.Errorf("COPY has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdWorkdir(workdir string) error { - b.config.WorkingDir = workdir - return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) -} - -func (b *buildFile) CmdVolume(args string) error { - if args == "" { - return fmt.Errorf("Volume cannot be empty") - } - - var volume []string - if err := json.Unmarshal([]byte(args), &volume); err != nil { - volume = []string{args} - } - if b.config.Volumes == nil { - b.config.Volumes = map[string]struct{}{} - } - for _, v := range volume { - b.config.Volumes[v] = struct{}{} - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { - return err - } - return nil -} - -func (b *buildFile) checkPathForAddition(orig string) error { - origPath := path.Join(b.contextPath, orig) - if p, err := filepath.EvalSymlinks(origPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } else { - origPath = p - } - if !strings.HasPrefix(origPath, b.contextPath) { - return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) - } - _, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - return nil -} - -func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error { - var ( - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.BasefsPath(), dest) - ) - // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") { - destPath = destPath + "/" - } - fi, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - - if fi.IsDir() { - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - return nil - } - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) - } - - // If we are adding a remote file, do not try to untar it - if !remote { - // try to successfully untar the orig - if err := archive.UntarPath(origPath, tarDest); err == nil { - return nil - } - utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) - } - - // If that fails, just copy it as a regular file - // but do not use all the magic path handling for the tar path - if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { - return err - } - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdAdd(args string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use ADD") - } - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ADD format") - } - - orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) - if err != nil { - return err - } - - dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} - b.config.Image = b.image - - var ( - origPath = orig - destPath = dest - remoteHash string - isRemote bool - ) - - if utils.IsURL(orig) { - isRemote = true - resp, err := utils.Download(orig) - if err != nil { - return err - } - tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") - if err != nil { - return err - } - tmpFileName := path.Join(tmpDirName, "tmp") - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err - } - defer os.RemoveAll(tmpDirName) - if _, err = io.Copy(tmpFile, resp.Body); err != nil { - tmpFile.Close() - return err - } - origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) - tmpFile.Close() - - // Process the checksum - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return err - } - tarSum := utils.TarSum{Reader: r, DisableCompression: true} - remoteHash = tarSum.Sum(nil) - r.Close() - - // If the destination is a directory, figure out the filename. - if strings.HasSuffix(dest, "/") { - u, err := url.Parse(orig) - if err != nil { - return err - } - path := u.Path - if strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - parts := strings.Split(path, "/") - filename := parts[len(parts)-1] - if filename == "" { - return fmt.Errorf("cannot determine filename from url: %s", u) - } - destPath = dest + filename - } - } - - if err := b.checkPathForAddition(origPath); err != nil { - return err - } - - // Hash path and check the cache - if b.utilizeCache { - var ( - hash string - sums = b.context.GetSums() - ) - - if remoteHash != "" { - hash = remoteHash - } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { - return err - } else if fi.IsDir() { - var subfiles []string - for file, sum := range sums { - absFile := path.Join(b.contextPath, file) - absOrigPath := path.Join(b.contextPath, origPath) - if strings.HasPrefix(absFile, absOrigPath) { - subfiles = append(subfiles, sum) - } - } - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) - } else { - if origPath[0] == '/' && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "./") - if h, ok := sums[origPath]; ok { - hash = "file:" + h - } - } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} - hit, err := b.probeCache() - if err != nil { - return err - } - // If we do not have a hash, never use the cache - if hit && hash != "" { - return nil - } - } - - // Create the container and start it - container, _, err := b.runtime.Create(b.config, "") - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - - if err := b.addContext(container, origPath, destPath, isRemote); err != nil { - return err - } - - if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { - return err - } - b.config.Cmd = cmd - return nil -} - -type StdoutFormater struct { - io.Writer - *utils.StreamFormatter -} - -func (sf *StdoutFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -type StderrFormater struct { - io.Writer - *utils.StreamFormatter -} - -func (sf *StderrFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -func (b *buildFile) create() (*Container, error) { - if b.image == "" { - return nil, fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.config.Image = b.image - - // Create the container and start it - c, _, err := b.runtime.Create(b.config, "") - if err != nil { - return nil, err - } - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - c.Path = b.config.Cmd[0] - c.Args = b.config.Cmd[1:] - - return c, nil -} - -func (b *buildFile) run(c *Container) error { - var errCh chan error - - if b.verbose { - errCh = utils.Go(func() error { - return <-c.Attach(nil, nil, b.outStream, b.errStream) - }) - } - - //start the container - if err := c.Start(); err != nil { - return err - } - - if errCh != nil { - if err := <-errCh; err != nil { - return err - } - } - - // Wait for it to finish - if ret := c.Wait(); ret != 0 { - err := &utils.JSONError{ - Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), - Code: ret, - } - return err - } - - return nil -} - -// Commit the container with the autorun command -func (b *buildFile) commit(id string, autoCmd []string, comment string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.config.Image = b.image - if id == "" { - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - container, warnings, err := b.runtime.Create(b.config, "") - if err != nil { - return err - } - for _, warning := range warnings { - fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) - } - b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) - id = container.ID - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - } - - container := b.runtime.Get(id) - if container == nil { - return fmt.Errorf("An error occured while creating the container") - } - - // Note: Actually copy the struct - autoConfig := *b.config - autoConfig.Cmd = autoCmd - // Commit the container - image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) - if err != nil { - return err - } - b.tmpImages[image.ID] = struct{}{} - b.image = image.ID - return nil -} - -// Long lines can be split with a backslash -var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) - -func (b *buildFile) Build(context io.Reader) (string, error) { - tmpdirPath, err := ioutil.TempDir("", "docker-build") - if err != nil { - return "", err - } - - decompressedStream, err := archive.DecompressStream(context) - if err != nil { - return "", err - } - - b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} - if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { - return "", err - } - defer os.RemoveAll(tmpdirPath) - - b.contextPath = tmpdirPath - filename := path.Join(tmpdirPath, "Dockerfile") - if _, err := os.Stat(filename); os.IsNotExist(err) { - return "", fmt.Errorf("Can't build a directory with no Dockerfile") - } - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return "", err - } - if len(fileBytes) == 0 { - return "", ErrDockerfileEmpty - } - dockerfile := string(fileBytes) - dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") - stepN := 0 - for _, line := range strings.Split(dockerfile, "\n") { - line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") - // Skip comments and empty line - if len(line) == 0 || line[0] == '#' { - continue - } - if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { - return "", err - } - stepN += 1 - - } - if b.image != "" { - fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) - if b.rm { - b.clearTmp(b.tmpContainers) - } - return b.image, nil - } - return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") -} - -// BuildStep parses a single build step from `instruction` and executes it in the current context. -func (b *buildFile) BuildStep(name, expression string) error { - fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) - tmp := strings.SplitN(expression, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid Dockerfile format") - } - instruction := strings.ToLower(strings.Trim(tmp[0], " ")) - arguments := strings.Trim(tmp[1], " ") - - method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) - if !exists { - fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) - return nil - } - - ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() - if ret != nil { - return ret.(error) - } - - fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) - return nil -} - -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile { - return &buildFile{ - runtime: srv.runtime, - srv: srv, - config: &runconfig.Config{}, - outStream: outStream, - errStream: errStream, - tmpContainers: make(map[string]struct{}), - tmpImages: make(map[string]struct{}), - verbose: verbose, - utilizeCache: utilizeCache, - rm: rm, - sf: sf, - authConfig: auth, - configFile: authConfigFile, - outOld: outOld, - } -} diff -Nru docker.io-0.9.1~dfsg1/builtins/builtins.go docker.io-1.3.2~dfsg1/builtins/builtins.go --- docker.io-0.9.1~dfsg1/builtins/builtins.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/builtins/builtins.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,21 +1,40 @@ package builtins import ( - "github.com/dotcloud/docker/engine" + "runtime" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/networkdriver/lxc" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/events" + "github.com/docker/docker/pkg/parsers/kernel" ) -func Register(eng *engine.Engine) { - daemon(eng) - remote(eng) +func Register(eng *engine.Engine) error { + if err := daemon(eng); err != nil { + return err + } + if err := remote(eng); err != nil { + return err + } + if err := events.New().Install(eng); err != nil { + return err + } + if err := eng.Register("version", dockerVersion); err != nil { + return err + } + + return nil } // remote: a RESTful api for cross-docker communication -func remote(eng *engine.Engine) { - eng.Register("serveapi", api.ServeApi) +func remote(eng *engine.Engine) error { + if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { + return err + } + return eng.Register("acceptconnections", apiserver.AcceptConnections) } // daemon: a default execution and storage backend for Docker on Linux, @@ -33,8 +52,24 @@ // // These components should be broken off into plugins of their own. // -func daemon(eng *engine.Engine) { - eng.Register("initserver", docker.InitServer) - eng.Register("init_networkdriver", lxc.InitDriver) - eng.Register("version", docker.GetVersion) +func daemon(eng *engine.Engine) error { + return eng.Register("init_networkdriver", bridge.InitDriver) +} + +// builtins jobs independent of any subsystem +func dockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.SetJson("Version", dockerversion.VERSION) + v.SetJson("ApiVersion", api.APIVERSION) + v.SetJson("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", runtime.Version()) + v.Set("Os", runtime.GOOS) + v.Set("Arch", runtime.GOARCH) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK } diff -Nru docker.io-0.9.1~dfsg1/CHANGELOG.md docker.io-1.3.2~dfsg1/CHANGELOG.md --- docker.io-0.9.1~dfsg1/CHANGELOG.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/CHANGELOG.md 2014-11-24 17:38:01.000000000 +0000 @@ -1,5 +1,305 @@ # Changelog +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Excluse more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + ## 0.9.1 (2014-03-24) #### Builder @@ -80,7 +380,7 @@ - Add newlines to the JSON stream functions. #### Runtime -* Do not ping the registry from the CLI. All requests to registres flow through the daemon. +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. @@ -672,7 +972,7 @@ + Add domainname support + Implement image filtering with path.Match -* Remove unnecesasry warnings +* Remove unnecessary warnings * Remove os/user dependency * Only mount the hostname file when the config exists * Handle signals within the `docker login` command @@ -695,7 +995,7 @@ + Hack: Vendor all dependencies * Remote API: Bump to v1.5 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvments +* Documentation: General improvements ## 0.6.1 (2013-08-23) @@ -965,7 +1265,7 @@ * Prevent rm of running containers * Use go1.1 cookiejar - Fix issue detaching from running TTY container -- Forbid parralel push/pull for a single image/repo. Fixes #311 +- Forbid parallel push/pull for a single image/repo. Fixes #311 - Fix race condition within Run command when attaching. #### Client @@ -1081,7 +1381,7 @@ + Add caching to docker builder + Add support for docker builder with native API as top level command + Implement ENV within docker builder -- Check the command existance prior create and add Unit tests for the case +- Check the command existence prior create and add Unit tests for the case * use any whitespaces instead of tabs #### Runtime @@ -1120,13 +1420,13 @@ #### Runtime -- Fix the command existance check +- Fix the command existence check - strings.Split may return an empty string on no match - Fix an index out of range crash if cgroup memory is not #### Documentation -* Various improvments +* Various improvements + New example: sharing data between 2 couchdb databases #### Other @@ -1156,7 +1456,7 @@ ## 0.2.0 (2013-04-23) - Runtime: ghost containers can be killed and waited for -* Documentation: update install intructions +* Documentation: update install instructions - Packaging: fix Vagrantfile - Development: automate releasing binaries and ubuntu packages + Add a changelog diff -Nru docker.io-0.9.1~dfsg1/commands_unit_test.go docker.io-1.3.2~dfsg1/commands_unit_test.go --- docker.io-0.9.1~dfsg1/commands_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/commands_unit_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/runconfig" - "strings" - "testing" -) - -func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) { - config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } - - if _, _, err := parse(t, "-link a"); err == nil { - t.Fatalf("Error parsing links. `-link a` should be an error but is not") - } - if _, _, err := parse(t, "-link"); err == nil { - t.Fatalf("Error parsing links. `-link` should be an error but is not") - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d -rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) - } else if _, exists := config.Volumes["/var"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) - } else if len(config.Volumes) != 0 { - t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) - } - - if _, _, err := parse(t, "-v /"); err == nil { - t.Fatalf("Expected error, but got none") - } - - if _, _, err := parse(t, "-v /:/"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") - } - if _, _, err := parse(t, "-v"); err == nil { - t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:ro"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") - } - if _, _, err := parse(t, "-v :"); err == nil { - t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") - } - if _, _, err := parse(t, "-v ::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") - } -} diff -Nru docker.io-0.9.1~dfsg1/config.go docker.io-1.3.2~dfsg1/config.go --- docker.io-0.9.1~dfsg1/config.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/config.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -package docker - -import ( - "net" - - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/networkdriver" -) - -const ( - defaultNetworkMtu = 1500 - DisableNetworkBridge = "none" -) - -// FIXME: separate runtime configuration from http api configuration -type DaemonConfig struct { - Pidfile string - Root string - AutoRestart bool - Dns []string - EnableIptables bool - EnableIpForward bool - DefaultIp net.IP - BridgeIface string - BridgeIP string - InterContainerCommunication bool - GraphDriver string - ExecDriver string - Mtu int - DisableNetwork bool -} - -// ConfigFromJob creates and returns a new DaemonConfig object -// by parsing the contents of a job's environment. -func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { - config := &DaemonConfig{ - Pidfile: job.Getenv("Pidfile"), - Root: job.Getenv("Root"), - AutoRestart: job.GetenvBool("AutoRestart"), - EnableIptables: job.GetenvBool("EnableIptables"), - EnableIpForward: job.GetenvBool("EnableIpForward"), - BridgeIP: job.Getenv("BridgeIP"), - BridgeIface: job.Getenv("BridgeIface"), - DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), - InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), - GraphDriver: job.Getenv("GraphDriver"), - ExecDriver: job.Getenv("ExecDriver"), - } - if dns := job.GetenvList("Dns"); dns != nil { - config.Dns = dns - } - if mtu := job.GetenvInt("Mtu"); mtu != 0 { - config.Mtu = mtu - } else { - config.Mtu = GetDefaultNetworkMtu() - } - config.DisableNetwork = config.BridgeIface == DisableNetworkBridge - - return config -} - -func GetDefaultNetworkMtu() int { - if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { - return iface.MTU - } - return defaultNetworkMtu -} diff -Nru docker.io-0.9.1~dfsg1/container.go docker.io-1.3.2~dfsg1/container.go --- docker.io-0.9.1~dfsg1/container.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/container.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1146 +0,0 @@ -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/links" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "os" - "path" - "strings" - "sync" - "syscall" - "time" -) - -const defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -var ( - ErrNotATTY = errors.New("The PTY is not a file") - ErrNoTTY = errors.New("No PTY found") - ErrContainerStart = errors.New("The container failed to start. Unknown error") - ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") -) - -type Container struct { - sync.Mutex - root string // Path to the "home" of the container, including metadata. - basefs string // Path to the graphdriver mountpoint - - ID string - - Created time.Time - - Path string - Args []string - - Config *runconfig.Config - State State - Image string - - NetworkSettings *NetworkSettings - - ResolvConfPath string - HostnamePath string - HostsPath string - Name string - Driver string - ExecDriver string - - command *execdriver.Command - stdout *utils.WriteBroadcaster - stderr *utils.WriteBroadcaster - stdin io.ReadCloser - stdinPipe io.WriteCloser - - runtime *Runtime - - waitLock chan struct{} - Volumes map[string]string - // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. - // Easier than migrating older container configs :) - VolumesRW map[string]bool - hostConfig *runconfig.HostConfig - - activeLinks map[string]*links.Link -} - -// FIXME: move deprecated port stuff to nat to clean up the core. -type PortMapping map[string]string // Deprecated - -type NetworkSettings struct { - IPAddress string - IPPrefixLen int - Gateway string - Bridge string - PortMapping map[string]PortMapping // Deprecated - Ports nat.PortMap -} - -func (settings *NetworkSettings) PortMappingAPI() *engine.Table { - var outs = engine.NewTable("", 0) - for port, bindings := range settings.Ports { - p, _ := nat.ParsePort(port.Port()) - if len(bindings) == 0 { - out := &engine.Env{} - out.SetInt("PublicPort", p) - out.Set("Type", port.Proto()) - outs.Add(out) - continue - } - for _, binding := range bindings { - out := &engine.Env{} - h, _ := nat.ParsePort(binding.HostPort) - out.SetInt("PrivatePort", p) - out.SetInt("PublicPort", h) - out.Set("Type", port.Proto()) - out.Set("IP", binding.HostIp) - outs.Add(out) - } - } - return outs -} - -// Inject the io.Reader at the given path. Note: do not close the reader -func (container *Container) Inject(file io.Reader, pth string) error { - if err := container.Mount(); err != nil { - return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) - } - defer container.Unmount() - - // Return error if path exists - destPath := path.Join(container.basefs, pth) - if _, err := os.Stat(destPath); err == nil { - // Since err is nil, the path could be stat'd and it exists - return fmt.Errorf("%s exists", pth) - } else if !os.IsNotExist(err) { - // Expect err might be that the file doesn't exist, so - // if it's some other error, return that. - - return err - } - - // Make sure the directory exists - if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { - return err - } - - dest, err := os.Create(destPath) - if err != nil { - return err - } - defer dest.Close() - - if _, err := io.Copy(dest, file); err != nil { - return err - } - return nil -} - -func (container *Container) When() time.Time { - return container.Created -} - -func (container *Container) FromDisk() error { - data, err := ioutil.ReadFile(container.jsonPath()) - if err != nil { - return err - } - // Load container settings - // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it - if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { - return err - } - return container.readHostConfig() -} - -func (container *Container) ToDisk() (err error) { - data, err := json.Marshal(container) - if err != nil { - return - } - err = ioutil.WriteFile(container.jsonPath(), data, 0666) - if err != nil { - return - } - return container.writeHostConfig() -} - -func (container *Container) readHostConfig() error { - container.hostConfig = &runconfig.HostConfig{} - // If the hostconfig file does not exist, do not read it. - // (We still have to initialize container.hostConfig, - // but that's OK, since we just did that above.) - _, err := os.Stat(container.hostConfigPath()) - if os.IsNotExist(err) { - return nil - } - data, err := ioutil.ReadFile(container.hostConfigPath()) - if err != nil { - return err - } - return json.Unmarshal(data, container.hostConfig) -} - -func (container *Container) writeHostConfig() (err error) { - data, err := json.Marshal(container.hostConfig) - if err != nil { - return - } - return ioutil.WriteFile(container.hostConfigPath(), data, 0666) -} - -func (container *Container) generateEnvConfig(env []string) error { - data, err := json.Marshal(env) - if err != nil { - return err - } - p, err := container.EnvConfigPath() - if err != nil { - return err - } - ioutil.WriteFile(p, data, 0600) - return nil -} - -func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { - var cStdout, cStderr io.ReadCloser - - var nJobs int - errors := make(chan error, 3) - if stdin != nil && container.Config.OpenStdin { - nJobs += 1 - if cStdin, err := container.StdinPipe(); err != nil { - errors <- err - } else { - go func() { - utils.Debugf("attach: stdin: begin") - defer utils.Debugf("attach: stdin: end") - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if container.Config.StdinOnce && !container.Config.Tty { - defer cStdin.Close() - } else { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - } - if container.Config.Tty { - _, err = utils.CopyEscapable(cStdin, stdin) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdin: %s", err) - } - errors <- err - }() - } - } - if stdout != nil { - nJobs += 1 - if p, err := container.StdoutPipe(); err != nil { - errors <- err - } else { - cStdout = p - go func() { - utils.Debugf("attach: stdout: begin") - defer utils.Debugf("attach: stdout: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stdout, cStdout) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdout: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - if cStdout, err := container.StdoutPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStdout) - } - }() - } - if stderr != nil { - nJobs += 1 - if p, err := container.StderrPipe(); err != nil { - errors <- err - } else { - cStderr = p - go func() { - utils.Debugf("attach: stderr: begin") - defer utils.Debugf("attach: stderr: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stderr, cStderr) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stderr: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - - if cStderr, err := container.StderrPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStderr) - } - }() - } - - return utils.Go(func() error { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - - // FIXME: how to clean up the stdin goroutine without the unwanted side effect - // of closing the passed stdin? Add an intermediary io.Pipe? - for i := 0; i < nJobs; i += 1 { - utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) - if err := <-errors; err != nil { - utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) - return err - } - utils.Debugf("attach: job %d completed successfully", i+1) - } - utils.Debugf("attach: all jobs completed successfully") - return nil - }) -} - -func populateCommand(c *Container) { - var ( - en *execdriver.Network - driverConfig []string - ) - - en = &execdriver.Network{ - Mtu: c.runtime.config.Mtu, - Interface: nil, - } - - if !c.Config.NetworkDisabled { - network := c.NetworkSettings - en.Interface = &execdriver.NetworkInterface{ - Gateway: network.Gateway, - Bridge: network.Bridge, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - } - } - - if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { - for _, pair := range lxcConf { - driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) - } - } - resources := &execdriver.Resources{ - Memory: c.Config.Memory, - MemorySwap: c.Config.MemorySwap, - CpuShares: c.Config.CpuShares, - } - c.command = &execdriver.Command{ - ID: c.ID, - Privileged: c.hostConfig.Privileged, - Rootfs: c.RootfsPath(), - InitPath: "/.dockerinit", - Entrypoint: c.Path, - Arguments: c.Args, - WorkingDir: c.Config.WorkingDir, - Network: en, - Tty: c.Config.Tty, - User: c.Config.User, - Config: driverConfig, - Resources: resources, - } - c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} -} - -func (container *Container) Start() (err error) { - container.Lock() - defer container.Unlock() - - if container.State.IsRunning() { - return fmt.Errorf("The container %s is already running.", container.ID) - } - - defer func() { - if err != nil { - container.cleanup() - } - }() - - if err := container.Mount(); err != nil { - return err - } - - if container.runtime.config.DisableNetwork { - container.Config.NetworkDisabled = true - container.buildHostnameAndHostsFiles("127.0.1.1") - } else { - if err := container.allocateNetwork(); err != nil { - return err - } - container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) - } - - // Make sure the config is compatible with the current kernel - if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { - log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") - container.Config.Memory = 0 - } - if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { - log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") - container.Config.MemorySwap = -1 - } - - if container.runtime.sysInfo.IPv4ForwardingDisabled { - log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") - } - - if err := prepareVolumesForContainer(container); err != nil { - return err - } - - // Setup environment - env := []string{ - "HOME=/", - "PATH=" + defaultPathEnv, - "HOSTNAME=" + container.Config.Hostname, - } - - if container.Config.Tty { - env = append(env, "TERM=xterm") - } - - // Init any links between the parent and children - runtime := container.runtime - - children, err := runtime.Children(container.Name) - if err != nil { - return err - } - - if len(children) > 0 { - container.activeLinks = make(map[string]*links.Link, len(children)) - - // If we encounter an error make sure that we rollback any network - // config and ip table changes - rollback := func() { - for _, link := range container.activeLinks { - link.Disable() - } - container.activeLinks = nil - } - - for linkAlias, child := range children { - if !child.State.IsRunning() { - return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) - } - - link, err := links.NewLink( - container.NetworkSettings.IPAddress, - child.NetworkSettings.IPAddress, - linkAlias, - child.Config.Env, - child.Config.ExposedPorts, - runtime.eng) - - if err != nil { - rollback() - return err - } - - container.activeLinks[link.Alias()] = link - if err := link.Enable(); err != nil { - rollback() - return err - } - - for _, envVar := range link.ToEnv() { - env = append(env, envVar) - } - } - } - - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) - if err := container.generateEnvConfig(env); err != nil { - return err - } - - if container.Config.WorkingDir != "" { - container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { - return nil - } - } - - envPath, err := container.EnvConfigPath() - if err != nil { - return err - } - - if err := mountVolumesForContainer(container, envPath); err != nil { - return err - } - - populateCommand(container) - container.command.Env = env - - // Setup logging of stdout and stderr to disk - if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { - return err - } - if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { - return err - } - container.waitLock = make(chan struct{}) - - callbackLock := make(chan struct{}) - callback := func(command *execdriver.Command) { - container.State.SetRunning(command.Pid()) - if command.Tty { - // The callback is called after the process Start() - // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace - // which we close here. - if c, ok := command.Stdout.(io.Closer); ok { - c.Close() - } - } - if err := container.ToDisk(); err != nil { - utils.Debugf("%s", err) - } - close(callbackLock) - } - - // We use a callback here instead of a goroutine and an chan for - // syncronization purposes - cErr := utils.Go(func() error { return container.monitor(callback) }) - - // Start should not return until the process is actually running - select { - case <-callbackLock: - case err := <-cErr: - return err - } - return nil -} - -func (container *Container) Run() error { - if err := container.Start(); err != nil { - return err - } - container.Wait() - return nil -} - -func (container *Container) Output() (output []byte, err error) { - pipe, err := container.StdoutPipe() - if err != nil { - return nil, err - } - defer pipe.Close() - if err := container.Start(); err != nil { - return nil, err - } - output, err = ioutil.ReadAll(pipe) - container.Wait() - return output, err -} - -// Container.StdinPipe returns a WriteCloser which can be used to feed data -// to the standard input of the container's active process. -// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser -// which can be used to retrieve the standard output (and error) generated -// by the container's active process. The output (and error) are actually -// copied and delivered to all StdoutPipe and StderrPipe consumers, using -// a kind of "broadcaster". - -func (container *Container) StdinPipe() (io.WriteCloser, error) { - return container.stdinPipe, nil -} - -func (container *Container) StdoutPipe() (io.ReadCloser, error) { - reader, writer := io.Pipe() - container.stdout.AddWriter(writer, "") - return utils.NewBufReader(reader), nil -} - -func (container *Container) StderrPipe() (io.ReadCloser, error) { - reader, writer := io.Pipe() - container.stderr.AddWriter(writer, "") - return utils.NewBufReader(reader), nil -} - -func (container *Container) buildHostnameAndHostsFiles(IP string) { - container.HostnamePath = path.Join(container.root, "hostname") - ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) - - hostsContent := []byte(` -127.0.0.1 localhost -::1 localhost ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters -`) - - container.HostsPath = path.Join(container.root, "hosts") - - if container.Config.Domainname != "" { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...) - } else if !container.Config.NetworkDisabled { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) - } - - ioutil.WriteFile(container.HostsPath, hostsContent, 0644) -} - -func (container *Container) allocateNetwork() error { - if container.Config.NetworkDisabled { - return nil - } - - var ( - env *engine.Env - err error - eng = container.runtime.eng - ) - - if container.State.IsGhost() { - if container.runtime.config.DisableNetwork { - env = &engine.Env{} - } else { - currentIP := container.NetworkSettings.IPAddress - - job := eng.Job("allocate_interface", container.ID) - if currentIP != "" { - job.Setenv("RequestIP", currentIP) - } - - env, err = job.Stdout.AddEnv() - if err != nil { - return err - } - - if err := job.Run(); err != nil { - return err - } - } - } else { - job := eng.Job("allocate_interface", container.ID) - env, err = job.Stdout.AddEnv() - if err != nil { - return err - } - if err := job.Run(); err != nil { - return err - } - } - - if container.Config.PortSpecs != nil { - utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", ")) - if err := migratePortMappings(container.Config, container.hostConfig); err != nil { - return err - } - container.Config.PortSpecs = nil - if err := container.writeHostConfig(); err != nil { - return err - } - } - - var ( - portSpecs = make(nat.PortSet) - bindings = make(nat.PortMap) - ) - - if !container.State.IsGhost() { - if container.Config.ExposedPorts != nil { - portSpecs = container.Config.ExposedPorts - } - if container.hostConfig.PortBindings != nil { - bindings = container.hostConfig.PortBindings - } - } else { - if container.NetworkSettings.Ports != nil { - for port, binding := range container.NetworkSettings.Ports { - portSpecs[port] = struct{}{} - bindings[port] = binding - } - } - } - - container.NetworkSettings.PortMapping = nil - - for port := range portSpecs { - binding := bindings[port] - if container.hostConfig.PublishAllPorts && len(binding) == 0 { - binding = append(binding, nat.PortBinding{}) - } - - for i := 0; i < len(binding); i++ { - b := binding[i] - - portJob := eng.Job("allocate_port", container.ID) - portJob.Setenv("HostIP", b.HostIp) - portJob.Setenv("HostPort", b.HostPort) - portJob.Setenv("Proto", port.Proto()) - portJob.Setenv("ContainerPort", port.Port()) - - portEnv, err := portJob.Stdout.AddEnv() - if err != nil { - return err - } - if err := portJob.Run(); err != nil { - eng.Job("release_interface", container.ID).Run() - return err - } - b.HostIp = portEnv.Get("HostIP") - b.HostPort = portEnv.Get("HostPort") - - binding[i] = b - } - bindings[port] = binding - } - container.writeHostConfig() - - container.NetworkSettings.Ports = bindings - - container.NetworkSettings.Bridge = env.Get("Bridge") - container.NetworkSettings.IPAddress = env.Get("IP") - container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") - container.NetworkSettings.Gateway = env.Get("Gateway") - - return nil -} - -func (container *Container) releaseNetwork() { - if container.Config.NetworkDisabled { - return - } - eng := container.runtime.eng - - eng.Job("release_interface", container.ID).Run() - container.NetworkSettings = &NetworkSettings{} -} - -func (container *Container) monitor(callback execdriver.StartCallback) error { - var ( - err error - exitCode int - ) - - pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) - exitCode, err = container.runtime.Run(container, pipes, callback) - if err != nil { - utils.Errorf("Error running container: %s", err) - } - - if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() { - container.State.SetStopped(exitCode) - - // FIXME: there is a race condition here which causes this to fail during the unit tests. - // If another goroutine was waiting for Wait() to return before removing the container's root - // from the filesystem... At this point it may already have done so. - // This is because State.setStopped() has already been called, and has caused Wait() - // to return. - // FIXME: why are we serializing running state to disk in the first place? - //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) - if err := container.ToDisk(); err != nil { - utils.Errorf("Error dumping container state to disk: %s\n", err) - } - } - - // Cleanup - container.cleanup() - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } - - if container.runtime != nil && container.runtime.srv != nil { - container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) - } - - close(container.waitLock) - - return err -} - -func (container *Container) cleanup() { - container.releaseNetwork() - - // Disable all active links - if container.activeLinks != nil { - for _, link := range container.activeLinks { - link.Disable() - } - } - if container.Config.OpenStdin { - if err := container.stdin.Close(); err != nil { - utils.Errorf("%s: Error close stdin: %s", container.ID, err) - } - } - if err := container.stdout.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stdout: %s", container.ID, err) - } - if err := container.stderr.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stderr: %s", container.ID, err) - } - if container.command != nil && container.command.Terminal != nil { - if err := container.command.Terminal.Close(); err != nil { - utils.Errorf("%s: Error closing terminal: %s", container.ID, err) - } - } - - unmountVolumesForContainer(container) - - if err := container.Unmount(); err != nil { - log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) - } -} - -func (container *Container) kill(sig int) error { - container.Lock() - defer container.Unlock() - - if !container.State.IsRunning() { - return nil - } - return container.runtime.Kill(container, sig) -} - -func (container *Container) Kill() error { - if !container.State.IsRunning() { - return nil - } - - // 1. Send SIGKILL - if err := container.kill(9); err != nil { - return err - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := container.WaitTimeout(10 * time.Second); err != nil { - if container.command == nil { - return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) - } - log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) - if err := container.runtime.Kill(container, 9); err != nil { - return err - } - } - - container.Wait() - return nil -} - -func (container *Container) Stop(seconds int) error { - if !container.State.IsRunning() { - return nil - } - - // 1. Send a SIGTERM - if err := container.kill(15); err != nil { - utils.Debugf("Error sending kill SIGTERM: %s", err) - log.Print("Failed to send SIGTERM to the process, force killing") - if err := container.kill(9); err != nil { - return err - } - } - - // 2. Wait for the process to exit on its own - if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { - log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) - // 3. If it doesn't, then send SIGKILL - if err := container.Kill(); err != nil { - return err - } - } - return nil -} - -func (container *Container) Restart(seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := container.Mount(); err == nil { - defer container.Unmount() - } - - if err := container.Stop(seconds); err != nil { - return err - } - return container.Start() -} - -// Wait blocks until the container stops running, then returns its exit code. -func (container *Container) Wait() int { - <-container.waitLock - return container.State.GetExitCode() -} - -func (container *Container) Resize(h, w int) error { - return container.command.Terminal.Resize(h, w) -} - -func (container *Container) ExportRw() (archive.Archive, error) { - if err := container.Mount(); err != nil { - return nil, err - } - if container.runtime == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) - } - archive, err := container.runtime.Diff(container) - if err != nil { - container.Unmount() - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -func (container *Container) Export() (archive.Archive, error) { - if err := container.Mount(); err != nil { - return nil, err - } - - archive, err := archive.Tar(container.basefs, archive.Uncompressed) - if err != nil { - container.Unmount() - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -func (container *Container) WaitTimeout(timeout time.Duration) error { - done := make(chan bool) - go func() { - container.Wait() - done <- true - }() - - select { - case <-time.After(timeout): - return fmt.Errorf("Timed Out") - case <-done: - return nil - } -} - -func (container *Container) Mount() error { - return container.runtime.Mount(container) -} - -func (container *Container) Changes() ([]archive.Change, error) { - return container.runtime.Changes(container) -} - -func (container *Container) GetImage() (*Image, error) { - if container.runtime == nil { - return nil, fmt.Errorf("Can't get image of unregistered container") - } - return container.runtime.graph.Get(container.Image) -} - -func (container *Container) Unmount() error { - return container.runtime.Unmount(container) -} - -func (container *Container) logPath(name string) string { - return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) -} - -func (container *Container) ReadLog(name string) (io.Reader, error) { - return os.Open(container.logPath(name)) -} - -func (container *Container) hostConfigPath() string { - return path.Join(container.root, "hostconfig.json") -} - -func (container *Container) jsonPath() string { - return path.Join(container.root, "config.json") -} - -func (container *Container) EnvConfigPath() (string, error) { - p := path.Join(container.root, "config.env") - if _, err := os.Stat(p); err != nil { - if os.IsNotExist(err) { - f, err := os.Create(p) - if err != nil { - return "", err - } - f.Close() - } else { - return "", err - } - } - return p, nil -} - -// This method must be exported to be used from the lxc template -// This directory is only usable when the container is running -func (container *Container) RootfsPath() string { - return path.Join(container.root, "root") -} - -// This is the stand-alone version of the root fs, without any additional mounts. -// This directory is usable whenever the container is mounted (and not unmounted) -func (container *Container) BasefsPath() string { - return container.basefs -} - -func validateID(id string) error { - if id == "" { - return fmt.Errorf("Invalid empty id") - } - return nil -} - -// GetSize, return real size, virtual size -func (container *Container) GetSize() (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - driver = container.runtime.driver - ) - - if err := container.Mount(); err != nil { - utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) - return sizeRw, sizeRootfs - } - defer container.Unmount() - - if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { - sizeRw, err = differ.DiffSize(container.ID) - if err != nil { - utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - } else { - changes, _ := container.Changes() - if changes != nil { - sizeRw = archive.ChangesSize(container.basefs, changes) - } else { - sizeRw = -1 - } - } - - if _, err = os.Stat(container.basefs); err != nil { - if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { - sizeRootfs = -1 - } - } - return sizeRw, sizeRootfs -} - -func (container *Container) Copy(resource string) (io.ReadCloser, error) { - if err := container.Mount(); err != nil { - return nil, err - } - var filter []string - basePath := path.Join(container.basefs, resource) - stat, err := os.Stat(basePath) - if err != nil { - container.Unmount() - return nil, err - } - if !stat.IsDir() { - d, f := path.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{path.Base(basePath)} - basePath = path.Dir(basePath) - } - - archive, err := archive.TarFilter(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - Includes: filter, - }) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -// Returns true if the container exposes a certain port -func (container *Container) Exposes(p nat.Port) bool { - _, exists := container.Config.ExposedPorts[p] - return exists -} - -func (container *Container) GetPtyMaster() (*os.File, error) { - ttyConsole, ok := container.command.Terminal.(execdriver.TtyTerminal) - if !ok { - return nil, ErrNoTTY - } - return ttyConsole.Master(), nil -} diff -Nru docker.io-0.9.1~dfsg1/container_unit_test.go docker.io-1.3.2~dfsg1/container_unit_test.go --- docker.io-0.9.1~dfsg1/container_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/container_unit_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/nat" - "testing" -) - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestGetFullName(t *testing.T) { - name, err := getFullName("testing") - if err != nil { - t.Fatal(err) - } - if name != "/testing" { - t.Fatalf("Expected /testing got %s", name) - } - if _, err := getFullName(""); err == nil { - t.Fatal("Error should not be nil") - } -} diff -Nru docker.io-0.9.1~dfsg1/contrib/check-config.sh docker.io-1.3.2~dfsg1/contrib/check-config.sh --- docker.io-0.9.1~dfsg1/contrib/check-config.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/check-config.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +set -e + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) +: ${CONFIG:="${possibleConfigs[0]}"} + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} + +# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors +declare -A colors=( + [black]=30 + [red]=31 + [green]=32 + [yellow]=33 + [blue]=34 + [magenta]=35 + [cyan]=36 + [white]=37 +) +color() { + color=() + if [ "$1" = 'bold' ]; then + color+=( '1' ) + shift + fi + if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then + color+=( "${colors[$1]}" ) + fi + local IFS=';' + echo -en '\033['"${color[*]}"m +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set "$1"; then + wrap_good "CONFIG_$1" 'enabled' + else + wrap_bad "CONFIG_$1" 'missing' + fi +} + +check_flags() { + for flag in "$@"; do + echo "- $(check_flag "$flag")" + done +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + DEVPTS_MULTIPLE_INSTANCES + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED + MACVLAN VETH BRIDGE + NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} + NF_NAT NF_NAT_NEEDED +) +check_flags "${flags[@]}" +echo + +echo 'Optional Features:' +flags=( + MEMCG_SWAP + RESOURCE_COUNTERS + CGROUP_PERF +) +check_flags "${flags[@]}" + +echo '- Storage Drivers:' +{ + echo '- "'$(wrap_color 'aufs' blue)'":' + check_flags AUFS_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" + fi + + echo '- "'$(wrap_color 'btrfs' blue)'":' + check_flags BTRFS_FS | sed 's/^/ /' + + echo '- "'$(wrap_color 'devicemapper' blue)'":' + check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' +} | sed 's/^/ /' +echo + +#echo 'Potential Future Features:' +#check_flags USER_NS +#echo diff -Nru docker.io-0.9.1~dfsg1/contrib/completion/bash/docker docker.io-1.3.2~dfsg1/contrib/completion/bash/docker --- docker.io-0.9.1~dfsg1/contrib/completion/bash/docker 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/completion/bash/docker 2014-11-24 17:38:01.000000000 +0000 @@ -25,63 +25,59 @@ docker 2>/dev/null "$@" } -__docker_containers_all() -{ - local containers="$( __docker_q ps -a -q )" - local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" - COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) -} - -__docker_containers_running() -{ - local containers="$( __docker_q ps -q )" - local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" - COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) -} - -__docker_containers_stopped() -{ - local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )" - local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" - COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) -} - -__docker_image_repos() -{ - local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" - COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) ) -} - -__docker_image_repos_and_tags() -{ - local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" - local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" - COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) ) - __ltrim_colon_completions "$cur" +__docker_containers_all() { + local IFS=$'\n' + local containers=( $(__docker_q ps -aq --no-trunc) ) + if [ "$1" ]; then + containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) + fi + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + unset IFS + COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) +} + +__docker_containers_running() { + __docker_containers_all '.State.Running' +} + +__docker_containers_stopped() { + __docker_containers_all 'not .State.Running' +} + +__docker_containers_pauseable() { + __docker_containers_all 'and .State.Running (not .State.Paused)' +} + +__docker_containers_unpauseable() { + __docker_containers_all '.State.Paused' } -__docker_image_repos_and_tags_and_ids() -{ - local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" - local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" - local ids="$( __docker_q images -a -q )" - COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) ) +__docker_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) __ltrim_colon_completions "$cur" } -__docker_containers_and_images() -{ - local containers="$( __docker_q ps -a -q )" - local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" - local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" - local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" - local ids="$( __docker_q images -a -q )" - COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) ) +__docker_image_repos_and_tags_and_ids() { + local images="$(__docker_q images -a --no-trunc | awk 'NR>1 { print $3; if ($1 != "") { print $1; print $1":"$2 } }')" + COMPREPLY=( $(compgen -W "$images" -- "$cur") ) __ltrim_colon_completions "$cur" } -__docker_pos_first_nonflag() -{ +__docker_containers_and_images() { + __docker_containers_all + local containers=( "${COMPREPLY[@]}" ) + __docker_image_repos_and_tags_and_ids + COMPREPLY+=( "${containers[@]}" ) +} + +__docker_pos_first_nonflag() { local argument_flags=$1 local counter=$cpos @@ -103,8 +99,7 @@ echo $counter } -_docker_docker() -{ +_docker_docker() { case "$prev" in -H) return @@ -118,13 +113,12 @@ COMPREPLY=( $( compgen -W "-H" -- "$cur" ) ) ;; *) - COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) ;; esac } -_docker_attach() -{ +_docker_attach() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) ) @@ -138,8 +132,7 @@ esac } -_docker_build() -{ +_docker_build() { case "$prev" in -t|--tag) __docker_image_repos_and_tags @@ -151,19 +144,18 @@ case "$cur" in -*) - COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm --force-rm" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag '-t|--tag')" if [ $cword -eq $counter ]; then - _filedir + _filedir -d fi ;; esac } -_docker_commit() -{ +_docker_commit() { case "$prev" in -m|--message|-a|--author|--run) return @@ -193,8 +185,7 @@ esac } -_docker_cp() -{ +_docker_cp() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then case "$cur" in @@ -217,16 +208,82 @@ fi } -_docker_diff() -{ +_docker_create() { + case "$prev" in + -a|--attach) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cidfile|--env-file) + _filedir + return + ;; + --volumes-from) + __docker_containers_all + return + ;; + -v|--volume) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac + return + ;; + -e|--env) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + + if [ $cword -eq $counter ]; then + __docker_image_repos_and_tags_and_ids + fi + ;; + esac +} + +_docker_diff() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } -_docker_events() -{ +_docker_events() { case "$prev" in --since) return @@ -244,24 +301,32 @@ esac } -_docker_export() -{ +_docker_exec() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-d --detach -i --interactive -t --tty" -- "$cur" ) ) + ;; + *) + __docker_containers_running + ;; + esac +} + +_docker_export() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } -_docker_help() -{ +_docker_help() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) fi } -_docker_history() -{ +_docker_history() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) ) @@ -275,8 +340,7 @@ esac } -_docker_images() -{ +_docker_images() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) ) @@ -290,8 +354,7 @@ esac } -_docker_import() -{ +_docker_import() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then return @@ -304,21 +367,11 @@ fi } -_docker_info() -{ +_docker_info() { return } -_docker_insert() -{ - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_image_repos_and_tags_and_ids - fi -} - -_docker_inspect() -{ +_docker_inspect() { case "$prev" in -f|--format) return @@ -337,18 +390,15 @@ esac } -_docker_kill() -{ +_docker_kill() { __docker_containers_running } -_docker_load() -{ +_docker_load() { return } -_docker_login() -{ +_docker_login() { case "$prev" in -u|--username|-p|--password|-e|--email) return @@ -366,8 +416,7 @@ esac } -_docker_logs() -{ +_docker_logs() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) ) @@ -381,22 +430,24 @@ esac } -_docker_port() -{ +_docker_pause() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_pauseable + fi +} + +_docker_port() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } -_docker_ps() -{ +_docker_ps() { case "$prev" in - --since-id|--before-id) - COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) ) - # TODO replace this with __docker_containers_all - # see https://github.com/dotcloud/docker/issues/3565 - return + --since|--before) + __docker_containers_all ;; -n) return @@ -407,15 +458,14 @@ case "$cur" in -*) - COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) ;; *) ;; esac } -_docker_pull() -{ +_docker_pull() { case "$prev" in -t|--tag) return @@ -437,18 +487,14 @@ esac } -_docker_push() -{ +_docker_push() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then - __docker_image_repos - # TODO replace this with __docker_image_repos_and_tags - # see https://github.com/dotcloud/docker/issues/3411 + __docker_image_repos_and_tags fi } -_docker_restart() -{ +_docker_restart() { case "$prev" in -t|--time) return @@ -467,41 +513,80 @@ esac } -_docker_rm() -{ +_docker_rm() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) ) + return ;; *) + local force= + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + -f|--force) + __docker_containers_all + return + ;; + esac + done __docker_containers_stopped + return ;; esac } -_docker_rmi() -{ +_docker_rmi() { __docker_image_repos_and_tags_and_ids } -_docker_run() -{ +_docker_run() { case "$prev" in - --cidfile) + -a|--attach) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cidfile|--env-file) _filedir + return ;; --volumes-from) __docker_containers_all + return ;; -v|--volume) - # TODO something magical with colons and _filedir ? + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac return ;; -e|--env) COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf) + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) return ;; *) @@ -510,10 +595,11 @@ case "$cur" in -*) - COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -522,16 +608,14 @@ esac } -_docker_save() -{ +_docker_save() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi } -_docker_search() -{ +_docker_search() { case "$prev" in -s|--stars) return @@ -542,15 +626,14 @@ case "$cur" in -*) - COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) ;; *) ;; esac } -_docker_start() -{ +_docker_start() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) ) @@ -561,8 +644,7 @@ esac } -_docker_stop() -{ +_docker_stop() { case "$prev" in -t|--time) return @@ -581,8 +663,7 @@ esac } -_docker_tag() -{ +_docker_tag() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) ) @@ -604,61 +685,68 @@ esac } -_docker_top() -{ +_docker_unpause() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_containers_unpauseable + fi +} + +_docker_top() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_running fi } -_docker_version() -{ +_docker_version() { return } -_docker_wait() -{ +_docker_wait() { __docker_containers_all } -_docker() -{ - local commands=" - attach - build - commit - cp - diff - events - export - history - images - import - info - insert - inspect - kill - load - login - logs - port - ps - pull - push - restart - rm - rmi - run - save - search - start - stop - tag - top - version - wait - " +_docker() { + local commands=( + attach + build + commit + cp + create + diff + events + exec + export + history + images + import + info + insert + inspect + kill + load + login + logs + pause + port + ps + pull + push + restart + rm + rmi + run + save + search + start + stop + tag + top + unpause + version + wait + ) COMPREPLY=() local cur prev words cword diff -Nru docker.io-0.9.1~dfsg1/contrib/completion/fish/docker.fish docker.io-1.3.2~dfsg1/contrib/completion/fish/docker.fish --- docker.io-0.9.1~dfsg1/contrib/completion/fish/docker.fish 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/completion/fish/docker.fish 2014-11-24 17:38:01.000000000 +0000 @@ -16,7 +16,7 @@ function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' for i in (commandline -opc) - if contains -- $i attach build commit cp diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait + if contains -- $i attach build commit cp create diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait return 1 end end @@ -26,36 +26,38 @@ function __fish_print_docker_containers --description 'Print a list of docker containers' -a select switch $select case running - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case stopped - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case all - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' end end function __fish_print_docker_images --description 'Print a list of docker images' - docker images | awk 'NR>1' | grep -v '' | awk '{print $1":"$2}' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' end function __fish_print_docker_repositories --description 'Print a list of docker repositories' - docker images | awk 'NR>1' | grep -v '' | awk '{print $1}' | sort | uniq + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq end # common options complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group" complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified' complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API' complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking" complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" -complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver' @@ -69,21 +71,49 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build -complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile' +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress verbose build output' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' # commit complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith "' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith "' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp -complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from the containers filesystem to the host path' +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" @@ -100,16 +130,16 @@ # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'show all images (by default filter out the intermediate images used to build)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'output graph in tree format' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'output graph in graphviz format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import @@ -118,15 +148,11 @@ # info complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' -# insert -complete -c docker -f -n '__fish_docker_no_subcommand' -a insert -d 'Insert a file in an image' -complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_print_docker_images)' -d "Image" - # inspect complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers running)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' @@ -138,9 +164,9 @@ # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'email' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'password' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'username' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' @@ -154,13 +180,13 @@ # ps complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before-id -d 'Show only container created before Id, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since-id -d 'Show only containers created since Id, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server' @@ -180,12 +206,14 @@ # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" # run @@ -225,7 +253,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s t -l trusted -d 'Only show trusted builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' diff -Nru docker.io-0.9.1~dfsg1/contrib/completion/zsh/_docker docker.io-1.3.2~dfsg1/contrib/completion/zsh/_docker --- docker.io-0.9.1~dfsg1/contrib/completion/zsh/_docker 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/completion/zsh/_docker 2014-11-24 17:38:01.000000000 +0000 @@ -1,58 +1,139 @@ -#compdef docker +#compdef docker # -# zsh completion for docker (http://docker.io) +# zsh completion for docker (http://docker.com) # -# version: 0.2.2 -# author: Felix Riedel -# license: BSD License +# version: 0.3.0 # github: https://github.com/felixr/docker-zsh-completion # +# contributors: +# - Felix Riedel +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# -__parse_docker_list() { - sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}' +__docker_get_containers() { + local kind expl + declare -a running stopped lines args + + kind=$1 + shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)"$(_call_program commands docker ps ${args})"}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( $j < ${#header} - 1 )) { + i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1)) + j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1)) + k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2)) + begin[${header[$i,$(($j-1))]}]=$i + end[${header[$i,$(($j-1))]}]=$k + } + lines=(${lines[2,-1]}) + + # Container ID + local line + local s + for line in $lines; do + s="${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + + # Names + local name + local -a names + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}}) + for name in $names; do + s="${name}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + done + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped } __docker_stoppedcontainers() { - local expl - declare -a stoppedcontainers - stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' | __parse_docker_list )"}) - _describe -t containers-stopped "Stopped Containers" stoppedcontainers + __docker_get_containers stopped "$@" } __docker_runningcontainers() { - local expl - declare -a containers - - containers=(${(f)"$(docker ps | __parse_docker_list)"}) - _describe -t containers-active "Running Containers" containers + __docker_get_containers running "$@" } __docker_containers () { - __docker_stoppedcontainers - __docker_runningcontainers + __docker_get_containers all "$@" } __docker_images () { local expl declare -a images - images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"}) - images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) - _describe -t docker-images "Images" images + images=(${${${${(f)"$(_call_program commands docker images)"}[2,-1]}/ ##/\\:}%% *}) + images=(${${images%\\:}#} ${${${(f)"$(_call_program commands docker images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images } __docker_tags() { local expl declare -a tags - tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"}) + tags=(${${${${${(f)"$(_call_program commands docker images)"}#* }## #}%% *}[2,-1]}) _describe -t docker-tags "tags" tags } +__docker_repositories_with_tags() { + if compset -P '*:'; then + __docker_tags + else + __docker_repositories -qS ":" + fi +} + __docker_search() { # declare -a dockersearch local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi local searchterm cachename @@ -60,20 +141,19 @@ cachename=_docker-search-$searchterm local expl - local -a result + local -a result if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ && ! _retrieve_cache ${cachename#_}; then _message "Searching for ${searchterm}..." - result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"}) + result=(${${${(f)"$(_call_program commands docker search ${searchterm})"}%% *}[2,-1]}) _store_cache ${cachename#_} result - fi - _wanted dockersearch expl 'Available images' compadd -a result + fi + _wanted dockersearch expl 'available images' compadd -a result } __docker_caching_policy() { - # oldp=( "$1"(Nmh+24) ) # 24 hour - oldp=( "$1"(Nmh+1) ) # 24 hour + oldp=( "$1"(Nmh+1) ) # 1 hour (( $#oldp )) } @@ -81,8 +161,8 @@ __docker_repositories () { local expl declare -a repos - repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) - _describe -t docker-repos "Repositories" repos + repos=(${${${(f)"$(_call_program commands docker images)"}%% *}[2,-1]}) + _describe -t docker-repos "repositories" repos "$@" } __docker_commands () { @@ -91,15 +171,14 @@ zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ - && ! _retrieve_cache docker_subcommands; + && ! _retrieve_cache docker_subcommands; then - _docker_subcommands=(${${(f)"$(_call_program commands - docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}}) - _docker_subcommands=($_docker_subcommands 'help:Show help for a command') + _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi _describe -t docker-commands "docker command" _docker_subcommands @@ -108,36 +187,110 @@ __docker_subcommand () { local -a _command_args case "$words[1]" in - (attach|wait) - _arguments ':containers:__docker_runningcontainers' + (attach) + _arguments \ + '--no-stdin[Do not attach stdin]' \ + '--sig-proxy[Proxify all received signal]' \ + ':containers:__docker_runningcontainers' ;; (build) _arguments \ - '-t=-:repository:__docker_repositories' \ + '--force-rm[Always remove intermediate containers, even after unsuccessful builds]' \ + '--no-cache[Do not use cache when building the image]' \ + '-q[Suppress verbose build output]' \ + '--rm[Remove intermediate containers after a successful build]' \ + '-t:repository:__docker_repositories_with_tags' \ ':path or URL:_directories' ;; (commit) _arguments \ + '--author=-[Author]:author: ' \ + '-m[Commit message]:message: ' \ + '--run=-[Configuration automatically applied when the image is run]:configuration: ' \ ':container:__docker_containers' \ - ':repository:__docker_repositories' \ - ':tag: ' + ':repository:__docker_repositories_with_tags' ;; - (diff|export|logs) + (cp) + _arguments \ + ':container:->container' \ + ':hostpath:_files' + case $state in + (container) + if compset -P '*:'; then + _files + else + __docker_containers -qS ":" + fi + ;; + esac + ;; + (create) + _arguments \ + '-P[Publish all exposed ports to the host]' \ + '-a[Attach to stdin, stdout or stderr]' \ + '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '--cidfile=-[Write the container ID to the file]:CID file:_files' \ + '*--dns=-[Set custom dns servers]:dns server: ' \ + '*-e=-[Set environment variables]:environment variable: ' \ + '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--expose=-[Expose a port from the container without publishing it]: ' \ + '-h=-[Container host name]:hostname:_hosts' \ + '-i[Keep stdin open even if not attached]' \ + '--link=-[Add link to another container]:link:->link' \ + '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + '-m=-[Memory limit (in bytes)]:limit: ' \ + '--name=-[Container name]:name: ' \ + '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \ + '--privileged[Give extended privileges to this container]' \ + '-t[Allocate a pseudo-tty]' \ + '-u=-[Username or UID]:user:_users' \ + '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ + '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '-w=-[Working directory inside the container]:directory:_directories' \ + '(-):images:__docker_images' \ + '(-):command: _command_names -e' \ + '*::arguments: _normal' + (diff|export) _arguments '*:containers:__docker_containers' ;; + (exec) + _arguments \ + '-d[Detached mode: leave the container running in the background]' \ + '-i[Keep stdin open even if not attached]' \ + '-t[Allocate a pseudo-tty]' \ + ':containers:__docker_runningcontainers' + ;; (history) - _arguments '*:images:__docker_images' + _arguments \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '*:images:__docker_images' ;; (images) _arguments \ '-a[Show all images]' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '--tree[Output graph in tree format]' \ + '--viz[Output graph in graphviz format]' \ ':repository:__docker_repositories' ;; (inspect) - _arguments '*:containers:__docker_containers' + _arguments \ + '--format=-[Format the output using the given go template]:template: ' \ + '*:containers:__docker_containers' ;; - (history) - _arguments ':images:__docker_images' + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' + ;; + (info) + ;; + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' ;; (insert) _arguments '1:containers:__docker_containers' \ @@ -147,66 +300,127 @@ (kill) _arguments '*:containers:__docker_runningcontainers' ;; + (load) + ;; + (login) + _arguments \ + '-e[Email]:email: ' \ + '-p[Password]:password: ' \ + '-u[Username]:username: ' \ + ':server: ' + ;; + (logs) + _arguments \ + '-f[Follow log output]' \ + '*:containers:__docker_containers' + ;; (port) - _arguments '1:containers:__docker_runningcontainers' + _arguments \ + '1:containers:__docker_runningcontainers' \ + '2:port:_ports' ;; (start) - _arguments '*:containers:__docker_stoppedcontainers' + _arguments \ + '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \ + '-i[Attach container'"'"'s stding]' \ + '*:containers:__docker_stoppedcontainers' ;; (rm) - _arguments '-v[Remove the volumes associated to the container]' \ + _arguments \ + '--link[Remove the specified link and not the underlying container]' \ + '-v[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) - _arguments '-v[Remove the volumes associated to the container]' \ + _arguments \ '*:images:__docker_images' ;; - (top) - _arguments '1:containers:__docker_runningcontainers' - ;; (restart|stop) - _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ + _arguments '-t[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) - _arguments ':containers:__docker_runningcontainers' + _arguments \ + '1:containers:__docker_runningcontainers' \ + '(-)*:: :->ps-arguments' + case $state in + (ps-arguments) + _ps + ;; + esac + ;; (ps) - _arguments '-a[Show all containers. Only running containers are shown by default]' \ - '-h[Show help]' \ - '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ - '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' + _arguments \ + '-a[Show all containers]' \ + '--before=-[Show only container created before...]:containers:__docker_containers' \ + '-l[Show only the latest created container]' \ + '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '-s[Display sizes]' \ + '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) _arguments \ '-f[force]'\ ':image:__docker_images'\ - ':repository:__docker_repositories' \ - ':tag:__docker_tags' + ':repository:__docker_repositories_with_tags' ;; (run) _arguments \ - '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \ - '-c=-[CPU shares (relative weight)]:CPU shares: ' \ + '-P[Publish all exposed ports to the host]' \ + '-a[Attach to stdin, stdout or stderr]' \ + '-c[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '--cidfile=-[Write the container ID to the file]:CID file:_files' \ '-d[Detached mode: leave the container running in the background]' \ - '*--dns=[Set custom dns servers]:dns server: ' \ - '*-e=[Set environment variables]:environment variable: ' \ + '*--dns=-[Set custom dns servers]:dns server: ' \ + '*-e[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ - '-h=-[Container host name]:hostname:_hosts' \ + '*--expose=-[Expose a port from the container without publishing it]: ' \ + '-h[Container host name]:hostname:_hosts' \ '-i[Keep stdin open even if not attached]' \ - '-m=-[Memory limit (in bytes)]:limit: ' \ - '*-p=-[Expose a container''s port to the host]:port:_ports' \ - '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \ - '-u=-[Username or UID]:user:_users' \ - '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ + '--link=-[Add link to another container]:link:->link' \ + '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + '-m[Memory limit (in bytes)]:limit: ' \ + '--name=-[Container name]:name: ' \ + '*-p[Expose a container'"'"'s port to the host]:port:_ports' \ + '--privileged[Give extended privileges to this container]' \ + '--rm[Remove intermediate containers when it exits]' \ + '--sig-proxy[Proxify all received signal]' \ + '-t[Allocate a pseudo-tty]' \ + '-u[Username or UID]:user:_users' \ + '*-v[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '-w[Working directory inside the container]:directory:_directories' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' - ;; + + case $state in + (link) + if compset -P '*:'; then + _wanted alias expl 'Alias' compadd -E "" + else + __docker_runningcontainers -qS ":" + fi + ;; + esac + + ;; (pull|search) _arguments ':name:__docker_search' ;; + (push) + _arguments ':images:__docker_images' + ;; + (save) + _arguments \ + ':images:__docker_images' + ;; + (wait) + _arguments ':containers:__docker_runningcontainers' + ;; (help) _arguments ':subcommand:__docker_commands' ;; @@ -217,26 +431,41 @@ } _docker () { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + local curcontext="$curcontext" state line typeset -A opt_args _arguments -C \ - '-H=-[tcp://host:port to bind/connect to]:socket: ' \ + '-H[tcp://host:port to bind/connect to]:socket: ' \ '(-): :->command' \ - '(-)*:: :->option-or-argument' + '(-)*:: :->option-or-argument' if (( CURRENT == 1 )); then fi - case $state in + case $state in (command) __docker_commands ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-$words[1]: - __docker_subcommand + __docker_subcommand ;; esac } _docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff -Nru docker.io-0.9.1~dfsg1/contrib/crashTest.go docker.io-1.3.2~dfsg1/contrib/crashTest.go --- docker.io-0.9.1~dfsg1/contrib/crashTest.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/crashTest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -package main - -import ( - "fmt" - "io" - "log" - "net" - "os" - "os/exec" - "path" - "time" -) - -var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker") - -// WARNING: this crashTest will 1) crash your host, 2) remove all containers -func runDaemon() (*exec.Cmd, error) { - os.Remove("/var/run/docker.pid") - exec.Command("rm", "-rf", "/var/lib/docker/containers").Run() - cmd := exec.Command(DOCKERPATH, "-d") - outPipe, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - errPipe, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - if err := cmd.Start(); err != nil { - return nil, err - } - go func() { - io.Copy(os.Stdout, outPipe) - }() - go func() { - io.Copy(os.Stderr, errPipe) - }() - return cmd, nil -} - -func crashTest() error { - if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil { - return err - } - - var endpoint string - if ep := os.Getenv("TEST_ENDPOINT"); ep == "" { - endpoint = "192.168.56.1:7979" - } else { - endpoint = ep - } - - c := make(chan bool) - var conn io.Writer - - go func() { - conn, _ = net.Dial("tcp", endpoint) - c <- false - }() - go func() { - time.Sleep(2 * time.Second) - c <- true - }() - <-c - - restartCount := 0 - totalTestCount := 1 - for { - daemon, err := runDaemon() - if err != nil { - return err - } - restartCount++ - // time.Sleep(5000 * time.Millisecond) - var stop bool - go func() error { - stop = false - for i := 0; i < 100 && !stop; { - func() error { - cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount)) - i++ - totalTestCount++ - outPipe, err := cmd.StdoutPipe() - if err != nil { - return err - } - inPipe, err := cmd.StdinPipe() - if err != nil { - return err - } - if err := cmd.Start(); err != nil { - return err - } - if conn != nil { - go io.Copy(conn, outPipe) - } - - // Expecting error, do not check - inPipe.Write([]byte("hello world!!!!!\n")) - go inPipe.Write([]byte("hello world!!!!!\n")) - go inPipe.Write([]byte("hello world!!!!!\n")) - inPipe.Close() - - if err := cmd.Wait(); err != nil { - return err - } - outPipe.Close() - return nil - }() - } - return nil - }() - time.Sleep(20 * time.Second) - stop = true - if err := daemon.Process.Kill(); err != nil { - return err - } - } -} - -func main() { - if err := crashTest(); err != nil { - log.Println(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/contrib/desktop-integration/chromium/Dockerfile docker.io-1.3.2~dfsg1/contrib/desktop-integration/chromium/Dockerfile --- docker.io-0.9.1~dfsg1/contrib/desktop-integration/chromium/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/desktop-integration/chromium/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +DOCKER_VERSION 1.3 + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff -Nru docker.io-0.9.1~dfsg1/contrib/desktop-integration/data/Dockerfile docker.io-1.3.2~dfsg1/contrib/desktop-integration/data/Dockerfile --- docker.io-0.9.1~dfsg1/contrib/desktop-integration/data/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/desktop-integration/data/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -# VERSION: 0.1 -# DESCRIPTION: Create data image sharing /data volume -# AUTHOR: Daniel Mizyrycki -# COMMENTS: -# This image is used as base for all data containers. -# /data volume is owned by sysadmin. -# USAGE: -# # Download data Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile -# -# # Build data image -# docker build -t data -rm . -# -# # Create a data container. (eg: iceweasel-data) -# docker run -name iceweasel-data data true -# -# # List data from it -# docker run -volumes-from iceweasel-data busybox ls -al /data - -docker-version 0.6.5 - -# Smallest base image, just to launch a container -FROM busybox -MAINTAINER Daniel Mizyrycki - -# Create a regular user -RUN echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd -RUN echo 'sysadmin:x:1000:' >> /etc/group - -# Create directory for that user -RUN mkdir /data -RUN chown sysadmin.sysadmin /data - -# Add content to /data. This will keep sysadmin ownership -RUN touch /data/init_volume - -# Create /data volume -VOLUME /data diff -Nru docker.io-0.9.1~dfsg1/contrib/desktop-integration/gparted/Dockerfile docker.io-1.3.2~dfsg1/contrib/desktop-integration/gparted/Dockerfile --- docker.io-0.9.1~dfsg1/contrib/desktop-integration/gparted/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/desktop-integration/gparted/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,33 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +DOCKER-VERSION 1.3 + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff -Nru docker.io-0.9.1~dfsg1/contrib/desktop-integration/iceweasel/Dockerfile docker.io-1.3.2~dfsg1/contrib/desktop-integration/iceweasel/Dockerfile --- docker.io-0.9.1~dfsg1/contrib/desktop-integration/iceweasel/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/desktop-integration/iceweasel/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -# VERSION: 0.7 -# DESCRIPTION: Create iceweasel container with its dependencies -# AUTHOR: Daniel Mizyrycki -# COMMENTS: -# This file describes how to build a Iceweasel container with all -# dependencies installed. It uses native X11 unix socket and alsa -# sound devices. Tested on Debian 7.2 -# USAGE: -# # Download Iceweasel Dockerfile -# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile -# -# # Build iceweasel image -# docker build -t iceweasel -rm . -# -# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data -# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ -# -e DISPLAY=unix$DISPLAY iceweasel -# -# # To run stateful dockerized data containers -# docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ -# -e DISPLAY=unix$DISPLAY iceweasel - -docker-version 0.6.5 - -# Base docker image -FROM debian:wheezy -MAINTAINER Daniel Mizyrycki - -# Install Iceweasel and "sudo" -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo - -# create sysadmin account -RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin -RUN sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group -RUN sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers - -# Autorun iceweasel. -no-remote is necessary to create a new container, as -# iceweasel appears to communicate with itself through X11. -CMD ["/usr/bin/sudo", "-u", "sysadmin", "-H", "-E", "/usr/bin/iceweasel", "-no-remote"] diff -Nru docker.io-0.9.1~dfsg1/contrib/desktop-integration/README.md docker.io-1.3.2~dfsg1/contrib/desktop-integration/README.md --- docker.io-0.9.1~dfsg1/contrib/desktop-integration/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/desktop-integration/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -7,5 +7,5 @@ Examples ======== -* Data container: ./data/Dockerfile creates a data image sharing /data volume -* Iceweasel: ./iceweasel/Dockerfile shows a way to dockerize a common multimedia application +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff -Nru docker.io-0.9.1~dfsg1/contrib/docker-device-tool/device_tool.go docker.io-1.3.2~dfsg1/contrib/docker-device-tool/device_tool.go --- docker.io-0.9.1~dfsg1/contrib/docker-device-tool/device_tool.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/docker-device-tool/device_tool.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,7 +3,7 @@ import ( "flag" "fmt" - "github.com/dotcloud/docker/graphdriver/devmapper" + "github.com/docker/docker/daemon/graphdriver/devmapper" "os" "path" "sort" diff -Nru docker.io-0.9.1~dfsg1/contrib/host-integration/Dockerfile.dev docker.io-1.3.2~dfsg1/contrib/host-integration/Dockerfile.dev --- docker.io-0.9.1~dfsg1/contrib/host-integration/Dockerfile.dev 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/host-integration/Dockerfile.dev 2014-11-24 17:38:01.000000000 +0000 @@ -6,7 +6,7 @@ # FROM ubuntu:12.10 -MAINTAINER Guillaume J. Charmes +MAINTAINER Guillaume J. Charmes RUN apt-get update && apt-get install -y wget git mercurial @@ -19,7 +19,7 @@ ENV GOPATH /go ENV PATH $GOROOT/bin:$PATH -RUN go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3 +RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3 ADD manager.go /manager/ RUN cd /manager && go build -o /usr/bin/manager diff -Nru docker.io-0.9.1~dfsg1/contrib/host-integration/Dockerfile.min docker.io-1.3.2~dfsg1/contrib/host-integration/Dockerfile.min --- docker.io-0.9.1~dfsg1/contrib/host-integration/Dockerfile.min 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/host-integration/Dockerfile.min 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,4 @@ FROM busybox -MAINTAINER Guillaume J. Charmes +MAINTAINER Guillaume J. Charmes ADD manager /usr/bin/ ENTRYPOINT ["/usr/bin/manager"] diff -Nru docker.io-0.9.1~dfsg1/contrib/host-integration/manager.go docker.io-1.3.2~dfsg1/contrib/host-integration/manager.go --- docker.io-0.9.1~dfsg1/contrib/host-integration/manager.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/host-integration/manager.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,7 +5,7 @@ "encoding/json" "flag" "fmt" - "github.com/dotcloud/docker" + "github.com/docker/docker" "os" "strings" "text/template" @@ -70,7 +70,7 @@ bufErr := bytes.NewBuffer(nil) // Instanciate the Docker CLI - cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock") + cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil) // Retrieve the container info if err := cli.CmdInspect(flag.Arg(0)); err != nil { // As of docker v0.6.3, CmdInspect always returns nil diff -Nru docker.io-0.9.1~dfsg1/contrib/host-integration/manager.sh docker.io-1.3.2~dfsg1/contrib/host-integration/manager.sh --- docker.io-0.9.1~dfsg1/contrib/host-integration/manager.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/host-integration/manager.sh 2014-11-24 17:38:01.000000000 +0000 @@ -37,7 +37,7 @@ exit 1 fi -# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting) +# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting) #if command -v docker > /dev/null 2>&1; then # image="$(docker inspect -f '{{.Image}}' "$cid")" # if [ "$image" ]; then diff -Nru docker.io-0.9.1~dfsg1/contrib/init/openrc/docker.initd docker.io-1.3.2~dfsg1/contrib/init/openrc/docker.initd --- docker.io-0.9.1~dfsg1/contrib/init/openrc/docker.initd 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/openrc/docker.initd 2014-11-24 17:38:01.000000000 +0000 @@ -11,6 +11,9 @@ start() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + ulimit -n 1048576 + ulimit -u 1048576 + ebegin "Starting docker daemon" start-stop-daemon --start --background \ --exec "$DOCKER_BINARY" \ diff -Nru docker.io-0.9.1~dfsg1/contrib/init/systemd/docker.service docker.io-1.3.2~dfsg1/contrib/init/systemd/docker.service --- docker.io-0.9.1~dfsg1/contrib/init/systemd/docker.service 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/systemd/docker.service 2014-11-24 17:38:01.000000000 +0000 @@ -1,11 +1,11 @@ [Unit] Description=Docker Application Container Engine -Documentation=http://docs.docker.io -After=network.target +Documentation=http://docs.docker.com +After=network.target docker.socket +Requires=docker.socket [Service] -ExecStart=/usr/bin/docker -d -Restart=on-failure +ExecStart=/usr/bin/docker -d -H fd:// LimitNOFILE=1048576 LimitNPROC=1048576 diff -Nru docker.io-0.9.1~dfsg1/contrib/init/systemd/docker.socket docker.io-1.3.2~dfsg1/contrib/init/systemd/docker.socket --- docker.io-0.9.1~dfsg1/contrib/init/systemd/docker.socket 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/systemd/docker.socket 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff -Nru docker.io-0.9.1~dfsg1/contrib/init/systemd/MAINTAINERS docker.io-1.3.2~dfsg1/contrib/init/systemd/MAINTAINERS --- docker.io-0.9.1~dfsg1/contrib/init/systemd/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/systemd/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) diff -Nru docker.io-0.9.1~dfsg1/contrib/init/systemd/socket-activation/docker.service docker.io-1.3.2~dfsg1/contrib/init/systemd/socket-activation/docker.service --- docker.io-0.9.1~dfsg1/contrib/init/systemd/socket-activation/docker.service 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/systemd/socket-activation/docker.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Docker Application Container Engine -Documentation=http://docs.docker.io -After=network.target - -[Service] -ExecStart=/usr/bin/docker -d -H fd:// -Restart=on-failure -LimitNOFILE=1048576 -LimitNPROC=1048576 - -[Install] -WantedBy=multi-user.target diff -Nru docker.io-0.9.1~dfsg1/contrib/init/systemd/socket-activation/docker.socket docker.io-1.3.2~dfsg1/contrib/init/systemd/socket-activation/docker.socket --- docker.io-0.9.1~dfsg1/contrib/init/systemd/socket-activation/docker.socket 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/systemd/socket-activation/docker.socket 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -[Unit] -Description=Docker Socket for the API - -[Socket] -ListenStream=/var/run/docker.sock - -[Install] -WantedBy=sockets.target diff -Nru docker.io-0.9.1~dfsg1/contrib/init/sysvinit-debian/docker docker.io-1.3.2~dfsg1/contrib/init/sysvinit-debian/docker --- docker.io-0.9.1~dfsg1/contrib/init/sysvinit-debian/docker 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/sysvinit-debian/docker 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,12 @@ #!/bin/sh +set -e ### BEGIN INIT INFO # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. @@ -20,7 +23,10 @@ # modify these in /etc/default/$BASE (/etc/default/docker) DOCKER=/usr/bin/$BASE +# This is the pid file managed by docker itself DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" @@ -83,11 +89,19 @@ touch "$DOCKER_LOGFILE" chgrp docker "$DOCKER_LOGFILE" + ulimit -n 1048576 + if [ "$BASH" ]; then + ulimit -u 1048576 + else + ulimit -p 1048576 + fi + log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ --no-close \ --exec "$DOCKER" \ - --pidfile "$DOCKER_PIDFILE" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ -- \ -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ @@ -98,13 +112,13 @@ stop) fail_unless_root log_begin_msg "Stopping $DOCKER_DESC: $BASE" - start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" log_end_msg $? ;; restart) fail_unless_root - docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` [ -n "$docker_pid" ] \ && ps -p $docker_pid > /dev/null 2>&1 \ && $0 stop @@ -117,7 +131,7 @@ ;; status) - status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" ;; *) @@ -125,5 +139,3 @@ exit 1 ;; esac - -exit 0 diff -Nru docker.io-0.9.1~dfsg1/contrib/init/sysvinit-debian/docker.default docker.io-1.3.2~dfsg1/contrib/init/sysvinit-debian/docker.default --- docker.io-0.9.1~dfsg1/contrib/init/sysvinit-debian/docker.default 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/sysvinit-debian/docker.default 2014-11-24 17:38:01.000000000 +0000 @@ -4,7 +4,7 @@ #DOCKER="/usr/local/bin/docker" # Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4" +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" # If you need Docker to use an HTTP proxy, it can also be specified here. #export http_proxy="http://127.0.0.1:3128/" diff -Nru docker.io-0.9.1~dfsg1/contrib/init/sysvinit-redhat/docker docker.io-1.3.2~dfsg1/contrib/init/sysvinit-redhat/docker --- docker.io-0.9.1~dfsg1/contrib/init/sysvinit-redhat/docker 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/sysvinit-redhat/docker 2014-11-24 17:38:01.000000000 +0000 @@ -2,10 +2,10 @@ # # /etc/rc.d/init.d/docker # -# Daemon for docker.io -# +# Daemon for docker.com +# # chkconfig: 2345 95 95 -# description: Daemon for docker.io +# description: Daemon for docker.com ### BEGIN INIT INFO # Provides: docker @@ -16,7 +16,7 @@ # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop docker -# Description: Daemon for docker.io +# Description: Daemon for docker.com ### END INIT INFO # Source function library. @@ -49,6 +49,13 @@ $exec -d $other_args &>> $logfile & pid=$! touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + done success echo else @@ -61,7 +68,7 @@ stop() { echo -n $"Stopping $prog: " - killproc -p $pidfile $prog + killproc -p $pidfile -d 300 $prog retval=$? echo [ $retval -eq 0 ] && rm -f $lockfile diff -Nru docker.io-0.9.1~dfsg1/contrib/init/upstart/docker.conf docker.io-1.3.2~dfsg1/contrib/init/upstart/docker.conf --- docker.io-0.9.1~dfsg1/contrib/init/upstart/docker.conf 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/init/upstart/docker.conf 2014-11-24 17:38:01.000000000 +0000 @@ -1,7 +1,9 @@ description "Docker daemon" -start on filesystem +start on (local-filesystems and net-device-up IFACE!=lo) stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 respawn @@ -35,5 +37,5 @@ if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi - "$DOCKER" -d $DOCKER_OPTS + exec "$DOCKER" -d $DOCKER_OPTS end script diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage/busybox-static docker.io-1.3.2~dfsg1/contrib/mkimage/busybox-static --- docker.io-0.9.1~dfsg1/contrib/mkimage/busybox-static 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage/busybox-static 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage/debootstrap docker.io-1.3.2~dfsg1/contrib/mkimage/debootstrap --- docker.io-0.9.1~dfsg1/contrib/mkimage/debootstrap 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage/debootstrap 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,193 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +( + set -x + debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF' +#!/bin/sh + +# For most Docker users, "apt-get install" only happens during "docker build", +# where starting services doesn't work and often fails in humorous ways. This +# prevents those failures by stopping the services from attempting to start. + +exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; chroot "$rootfsDir" apt-get clean ) + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + # LTS + if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then + head -1 "$rootfsDir/etc/apt/sources.list" \ + | sed "s/ $suite / squeeze-lts /" \ + >> "$rootfsDir/etc/apt/sources.list" + fi + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + chroot "$rootfsDir" bash -c 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage/.febootstrap-minimize docker.io-1.3.2~dfsg1/contrib/mkimage/.febootstrap-minimize --- docker.io-0.9.1~dfsg1/contrib/mkimage/.febootstrap-minimize 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage/.febootstrap-minimize 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage/mageia-urpmi docker.io-1.3.2~dfsg1/contrib/mkimage/mageia-urpmi --- docker.io-0.9.1~dfsg1/contrib/mkimage/mageia-urpmi 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage/mageia-urpmi 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage/rinse docker.io-1.3.2~dfsg1/contrib/mkimage/rinse --- docker.io-0.9.1~dfsg1/contrib/mkimage/rinse 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage/rinse 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-alpine.sh docker.io-1.3.2~dfsg1/contrib/mkimage-alpine.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-alpine.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-alpine.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,82 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +REPO=$MIRROR/$REL/main +ARCH=$(uname -m) + +tmp +getapk +mkbase +conf +pack +save diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-arch.sh docker.io-1.3.2~dfsg1/contrib/mkimage-arch.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-arch.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-arch.sh 2014-11-24 17:38:01.000000000 +0000 @@ -5,34 +5,35 @@ set -e hash pacstrap &>/dev/null || { - echo "Could not find pacstrap. Run pacman -S arch-install-scripts" - exit 1 + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 } hash expect &>/dev/null || { - echo "Could not find expect. Run pacman -S expect" - exit 1 + echo "Could not find expect. Run pacman -S expect" + exit 1 } -ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX) +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) chmod 755 $ROOTFS # packages to ignore for space savings PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs expect <&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." @@ -10,7 +14,7 @@ } set -e -ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM mkdir $ROOTFS cd $ROOTFS diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-crux.sh docker.io-1.3.2~dfsg1/contrib/mkimage-crux.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-crux.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-crux.sh 2014-11-24 17:38:01.000000000 +0000 @@ -14,9 +14,9 @@ ISO=${1} -ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX) -CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX) -TMP=$(mktemp -d /tmp/XXXXXXXXXX) +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-debootstrap.sh docker.io-1.3.2~dfsg1/contrib/mkimage-debootstrap.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-debootstrap.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-debootstrap.sh 2014-11-24 17:38:01.000000000 +0000 @@ -1,6 +1,10 @@ #!/usr/bin/env bash set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now @@ -43,7 +47,7 @@ debianStable=wheezy debianUnstable=sid # this should match the name found at http://releases.ubuntu.com/ -ubuntuLatestLTS=precise +ubuntuLatestLTS=trusty # this should match the name found at http://releases.tanglu.org/ tangluLatest=aequorea @@ -114,7 +118,7 @@ # will be filled in later, if [ -z "$skipDetection" ] lsbDist='' -target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM" +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" @@ -140,7 +144,7 @@ # initctl (for some pesky upstart scripts) sudo chroot . dpkg-divert --local --rename --add /sbin/initctl sudo ln -sf /bin/true sbin/initctl - # see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173 + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) sudo chroot . apt-get clean @@ -219,6 +223,7 @@ # make sure our packages lists are as up to date as we can get them sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y fi if [ "$justTar" ]; then diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-rinse.sh docker.io-1.3.2~dfsg1/contrib/mkimage-rinse.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-rinse.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-rinse.sh 2014-11-24 17:38:01.000000000 +0000 @@ -8,6 +8,10 @@ set -e +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + repo="$1" distro="$2" mirror="$3" @@ -35,7 +39,7 @@ exit 1 fi -target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM" +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage.sh docker.io-1.3.2~dfsg1/contrib/mkimage.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar.xz" +touch "$tarFile" + +( + set -x + tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <<'EOF' +FROM scratch +ADD rootfs.tar.xz / +EOF + +# if our generated image has a decent shell, let's set a default command +for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do + if [ -x "$rootfsDir/$shell" ]; then + ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff -Nru docker.io-0.9.1~dfsg1/contrib/mkimage-unittest.sh docker.io-1.3.2~dfsg1/contrib/mkimage-unittest.sh --- docker.io-0.9.1~dfsg1/contrib/mkimage-unittest.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkimage-unittest.sh 2014-11-24 17:38:01.000000000 +0000 @@ -15,7 +15,7 @@ shopt -s extglob set -ex -ROOTFS=`mktemp -d /tmp/rootfs-busybox.XXXXXXXXXX` +ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX` trap "rm -rf $ROOTFS" INT QUIT TERM cd $ROOTFS diff -Nru docker.io-0.9.1~dfsg1/contrib/mkseccomp.pl docker.io-1.3.2~dfsg1/contrib/mkseccomp.pl --- docker.io-0.9.1~dfsg1/contrib/mkseccomp.pl 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/mkseccomp.pl 2014-11-24 17:38:01.000000000 +0000 @@ -10,7 +10,7 @@ # can configure the list of syscalls. When run, this script produces output # which, when stored in a file, can be passed to docker as follows: # -# docker run -lxc-conf="lxc.seccomp=$file" +# docker run --lxc-conf="lxc.seccomp=$file" # # The included sample file shows how to cut about a quarter of all syscalls, # which affecting most applications. diff -Nru docker.io-0.9.1~dfsg1/contrib/nuke-graph-directory.sh docker.io-1.3.2~dfsg1/contrib/nuke-graph-directory.sh --- docker.io-0.9.1~dfsg1/contrib/nuke-graph-directory.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/nuke-graph-directory.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs &> /dev/null; then + root="$(df "$dir" | awk 'NR>1 { print $NF }')" + for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do + subvolDir="$root/$subvol" + if dir_in_dir "$subvolDir" "$dir"; then + ( set -x; btrfs subvolume delete "$subvolDir" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( set -x; rm -rf "$dir" ) diff -Nru docker.io-0.9.1~dfsg1/contrib/prepare-commit-msg.hook docker.io-1.3.2~dfsg1/contrib/prepare-commit-msg.hook --- docker.io-0.9.1~dfsg1/contrib/prepare-commit-msg.hook 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/prepare-commit-msg.hook 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -#!/bin/sh -# Auto sign all commits to allow them to be used by the Docker project. -# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work -# -GH_USER=$(git config --get github.user) -SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p") -grep -qs "^$SOB" "$1" || { - echo - echo "$SOB" -} >> "$1" diff -Nru docker.io-0.9.1~dfsg1/contrib/syntax/kate/Dockerfile.xml docker.io-1.3.2~dfsg1/contrib/syntax/kate/Dockerfile.xml --- docker.io-0.9.1~dfsg1/contrib/syntax/kate/Dockerfile.xml 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/syntax/kate/Dockerfile.xml 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + FROM + MAINTAINER + ENV + RUN + ONBUILD + COPY + ADD + VOLUME + EXPOSE + ENTRYPOINT + CMD + WORKDIR + USER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru docker.io-0.9.1~dfsg1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage docker.io-1.3.2~dfsg1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage --- docker.io-0.9.1~dfsg1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage 2014-11-24 17:38:01.000000000 +0000 @@ -12,7 +12,7 @@ match - ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR)\s + ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY)\s captures 0 diff -Nru docker.io-0.9.1~dfsg1/contrib/syntax/vim/syntax/dockerfile.vim docker.io-1.3.2~dfsg1/contrib/syntax/vim/syntax/dockerfile.vim --- docker.io-0.9.1~dfsg1/contrib/syntax/vim/syntax/dockerfile.vim 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/syntax/vim/syntax/dockerfile.vim 2014-11-24 17:38:01.000000000 +0000 @@ -11,7 +11,7 @@ syntax case ignore -syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR)\s/ +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR|COPY)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ diff -Nru docker.io-0.9.1~dfsg1/contrib/vagrant-docker/README.md docker.io-1.3.2~dfsg1/contrib/vagrant-docker/README.md --- docker.io-0.9.1~dfsg1/contrib/vagrant-docker/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/vagrant-docker/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -31,20 +31,20 @@ respawn script - /usr/bin/docker -d -H=tcp://0.0.0.0:4243 + /usr/bin/docker -d -H=tcp://0.0.0.0:2375 end script ``` Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` -ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost ``` -(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) Note that because the port has been changed, to run docker commands from within the command line you must run them like this: ``` -sudo docker -H 0.0.0.0:4243 < commands for docker > +sudo docker -H 0.0.0.0:2375 < commands for docker > ``` diff -Nru docker.io-0.9.1~dfsg1/contrib/zfs/MAINTAINERS docker.io-1.3.2~dfsg1/contrib/zfs/MAINTAINERS --- docker.io-0.9.1~dfsg1/contrib/zfs/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/zfs/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Gurjeet Singh (gurjeet.singh.im) diff -Nru docker.io-0.9.1~dfsg1/contrib/zfs/README.md docker.io-1.3.2~dfsg1/contrib/zfs/README.md --- docker.io-0.9.1~dfsg1/contrib/zfs/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/contrib/zfs/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -# ZFS Storage Driver - -This is a placeholder to declare the presence and status of ZFS storage driver -for containers. - -The current development is done in Gurjeet Singh's fork of Docker, under the -branch named [zfs_driver]. - -[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver - - -# Status - -Alpha: The code is now capable of creating, running and destroying containers -and images. - -The code is under development. Contributions in the form of suggestions, -code-reviews, and patches are welcome. - -Please send the communication to gurjeet@singh.im and CC at least one Docker -mailing list. - - diff -Nru docker.io-0.9.1~dfsg1/CONTRIBUTING.md docker.io-1.3.2~dfsg1/CONTRIBUTING.md --- docker.io-0.9.1~dfsg1/CONTRIBUTING.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/CONTRIBUTING.md 2014-11-24 17:38:01.000000000 +0000 @@ -4,20 +4,58 @@ started. They are probably not perfect, please let us know if anything feels wrong or incomplete. +## Topics + +* [Security Reports](#security-reports) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-issues) +* [Build Environment](#build-environment) +* [Contribution Guidelines](#contribution-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Security Reports + +Please **DO NOT** file an issue for security related issues. Please send your +reports to [security@docker.com](mailto:security@docker.com) instead. + +## Design and Cleanup Proposals + +When considering a design proposal, we are looking for: + +* A description of the problem this design proposal solves +* An issue -- not a pull request -- that describes what you will take action on + * Please prefix your issue with `Proposal:` in the title +* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) + before reporting a new issue. You can always pair with someone if you both + have the same idea. + +When considering a cleanup task, we are looking for: + +* A description of the refactors made + * Please note any logic changes if necessary +* A pull request with the code + * Please prefix your PR's title with `Cleanup:` so we can quickly address it. + * Your pull request must remain up to date with master, so rebase as necessary. + ## Reporting Issues -When reporting [issues](https://github.com/dotcloud/docker/issues) -on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), -the output of `uname -a` and the output of `docker version` along with -the output of `docker info`. Please include the steps required to reproduce -the problem if possible and applicable. -This information will help us review and fix your issue faster. +When reporting [issues](https://github.com/docker/docker/issues) on +GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). +Please include: + +* The output of `uname -a`. +* The output of `docker version`. +* The output of `docker -D info`. + +Please also include the steps required to reproduce the problem if +possible and applicable. This information will help us review and fix +your issue faster. ## Build Environment For instructions on setting up your development environment, please see our dedicated [dev environment setup -docs](http://docs.docker.io/en/latest/contributing/devenvironment/). +docs](http://docs.docker.com/contributing/devenvironment/). ## Contribution guidelines @@ -34,7 +72,7 @@ We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement -that feature *on top of* docker. +that feature *on top of* Docker. ### Discuss your design on the mailing list @@ -48,7 +86,7 @@ ### Create issues... Any significant improvement should be documented as [a GitHub -issue](https://github.com/dotcloud/docker/issues) before anybody +issue](https://github.com/docker/docker/issues) before anybody starts working on it. ### ...but check for existing issues first! @@ -60,12 +98,12 @@ ### Conventions -Fork the repo and make changes on your fork in a feature branch: +Fork the repository and make changes on your fork in a feature branch: -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the + issue. - If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. + intentions, and name it XXXX-something where XXXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. Run the full test suite on @@ -73,22 +111,19 @@ Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. +well as a clean documentation build. See `docs/README.md` for more +information on building the docs and how they get released. Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `go fmt` before committing your changes. Most -editors have plugins that do this automatically, and there's also a git -pre-commit hook: - -``` -curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit -``` +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. -Pull requests must not contain commits from other users or branches. +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be @@ -96,28 +131,33 @@ request automatically, but the reviewers will not be notified unless you comment. +Pull requests must be cleanly rebased ontop of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + Before the pull request is merged, make sure that you squash your commits into logical units of work using `git rebase -i` and `git push -f`. After every commit the test suite should be passing. Include documentation changes in the same commit so that a revert would remove all traces of the feature or fix. -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. +Commits that fix or close an issue should include a reference like +`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the +issue when merged. -Add your name to the AUTHORS file, but make sure the list is sorted and your -name and email address match your git configuration. The AUTHORS file is -regenerated occasionally from the git commit history, so a mismatch may result -in your changes being overwritten. +Please do not add yourself to the `AUTHORS` file, as it is regenerated +regularly from the Git history. ### Merge approval -Docker maintainers use LGTM (looks good to me) in comments on the code review +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects docs/ and registry/, it -needs an absolute majority from the maintainers of docs/ AND, separately, an -absolute majority of the maintainers of registry. +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) @@ -126,66 +166,151 @@ The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below: +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): ``` -Docker Developer Certificate of Origin 1.1 +Developer Certificate of Origin +Version 1.1 -By making a contribution to the Docker Project ("Project"), I represent and -warrant that: - -a. The contribution was created in whole or in part by me and I have the right -to submit the contribution on my own behalf or on behalf of a third party who -has authorized me to submit this contribution to the Project; or - -b. The contribution is based upon previous work that, to the best of my -knowledge, is covered under an appropriate open source license and I have the -right and authorization to submit that work with modifications, whether -created in whole or in part by me, under the same open source license (unless -I am permitted to submit under a different license) that I have identified in -the contribution; or - -c. The contribution was provided directly to me by some other person who -represented and warranted (a) or (b) and I have not modified it. - -d. I understand and agree that this Project and the contribution are publicly -known and that a record of the contribution (including all personal -information I submit with it, including my sign-off record) is maintained -indefinitely and may be redistributed consistent with this Project or the open -source license(s) involved. +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. ``` -then you just add a line to every git commit message: +Then you just add a line to every git commit message: - Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + Signed-off-by: Joe Smith -using your real name (sorry, no pseudonyms or anonymous contributions.) +Using your real name (sorry, no pseudonyms or anonymous contributions.) -One way to automate this, is customise your get ``commit.template`` by adding -a ``prepare-commit-msg`` hook to your docker checkout: +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. -``` -curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg -``` - -* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` +Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still +accepted, so there is no need to update outstanding pull requests to the new +format right away, but please do adjust your processes for future contributions. #### Small patch exception There are several exceptions to the signing requirement. Currently these are: * Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. -If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) +If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com) ### How can I become a maintainer? -* Step 1: learn the component inside out -* Step 2: make yourself useful by contributing code, bugfixes, support etc. -* Step 3: volunteer on the irc channel (#docker@freenode) -* Step 4: propose yourself at a scheduled docker meeting in #docker-dev +* Step 1: Learn the component inside out +* Step 2: Make yourself useful by contributing code, bug fixes, support etc. +* Step 3: Volunteer on the IRC channel (#docker at Freenode) +* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +### IRC Meetings + +There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones. +Anybody can ask for a topic to be discussed prior to the meeting. + +If you feel the conversation is going off-topic, feel free to point it out. + +For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes). +They also contain all the notes from previous meetings. + +## Docker Community Guidelines + +We want to keep the Docker community awesome, growing and collaborative. We +need your help to keep it that way. To help with this we've come up with some +general guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: no + regional, racial, gender, or other abuse will be tolerated. We like nice people + way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community + feel welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break the + law. + +* Stay on topic: Make sure that you are posting to the correct channel + and avoid off-topic discussions. Remember when you update an issue or + respond to an email you are potentially sending to a large number of + people. Please consider this before you update. Also remember that + nobody likes spam. + +### Guideline Violations — 3 Strikes Method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't + hold a grudge. + +* People who commit minor infractions will get some education, + rather than hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how + much you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or + forgiveness. -Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. -You don't have to be a maintainer to make a difference on the project! +* Contact james@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with + a fair solution if there has been a misunderstanding. diff -Nru docker.io-0.9.1~dfsg1/daemon/attach.go docker.io-1.3.2~dfsg1/daemon/attach.go --- docker.io-0.9.1~dfsg1/daemon/attach.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/attach.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,273 @@ +package daemon + +import ( + "encoding/json" + "io" + "os" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + logs = job.GetenvBool("logs") + stream = job.GetenvBool("stream") + stdin = job.GetenvBool("stdin") + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + ) + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + //logs + if logs { + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + dec := json.NewDecoder(cLog) + for { + l := &jsonlog.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + if l.Stream == "stdout" && stdout { + io.WriteString(job.Stdout, l.Log) + } + if l.Stream == "stderr" && stderr { + io.WriteString(job.Stderr, l.Log) + } + } + } + } + + //stream + if stream { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + ) + + if stdin { + r, w := io.Pipe() + go func() { + defer w.Close() + defer log.Debugf("Closing buffered stdin pipe") + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if stdout { + cStdout = job.Stdout + } + if stderr { + cStderr = job.Stderr + } + + <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return engine.StatusOK +} + +// FIXME: this should be private, and every outside subsystem +// should go through the "container_attach" job. But that would require +// that job to be properly documented, as well as the relationship between +// Attach and ContainerAttach. +// +// This method is in use by builder/builder.go. +func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { + var ( + cStdout, cStderr io.ReadCloser + nJobs int + errors = make(chan error, 3) + ) + + // Connect stdin of container to the http conn. + if stdin != nil && openStdin { + nJobs++ + // Get the stdin pipe. + if cStdin, err := streamConfig.StdinPipe(); err != nil { + errors <- err + } else { + go func() { + log.Debugf("attach: stdin: begin") + defer log.Debugf("attach: stdin: end") + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if stdinOnce && !tty { + defer cStdin.Close() + } else { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + } + if tty { + _, err = utils.CopyEscapable(cStdin, stdin) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stdin: %s", err) + } + errors <- err + }() + } + } + if stdout != nil { + nJobs++ + // Get a reader end of a pipe that is attached as stdout to the container. + if p, err := streamConfig.StdoutPipe(); err != nil { + errors <- err + } else { + cStdout = p + go func() { + log.Debugf("attach: stdout: begin") + defer log.Debugf("attach: stdout: end") + // If we are in StdinOnce mode, then close stdin + if stdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stdout, cStdout) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stdout: %s", err) + } + errors <- err + }() + } + } else { + // Point stdout of container to a no-op writer. + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + if cStdout, err := streamConfig.StdoutPipe(); err != nil { + log.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&ioutils.NopWriter{}, cStdout) + } + }() + } + if stderr != nil { + nJobs++ + if p, err := streamConfig.StderrPipe(); err != nil { + errors <- err + } else { + cStderr = p + go func() { + log.Debugf("attach: stderr: begin") + defer log.Debugf("attach: stderr: end") + // If we are in StdinOnce mode, then close stdin + // Why are we closing stdin here and above while handling stdout? + if stdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stderr, cStderr) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + log.Errorf("attach: stderr: %s", err) + } + errors <- err + }() + } + } else { + // Point stderr at a no-op writer. + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + + if cStderr, err := streamConfig.StderrPipe(); err != nil { + log.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&ioutils.NopWriter{}, cStderr) + } + }() + } + + return promise.Go(func() error { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + + // FIXME: how to clean up the stdin goroutine without the unwanted side effect + // of closing the passed stdin? Add an intermediary io.Pipe? + for i := 0; i < nJobs; i++ { + log.Debugf("attach: waiting for job %d/%d", i+1, nJobs) + if err := <-errors; err != nil { + log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) + return err + } + log.Debugf("attach: job %d completed successfully", i+1) + } + log.Debugf("attach: all jobs completed successfully") + return nil + }) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/changes.go docker.io-1.3.2~dfsg1/daemon/changes.go --- docker.io-0.9.1~dfsg1/daemon/changes.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/changes.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,32 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/commit.go docker.io-1.3.2~dfsg1/daemon/commit.go --- docker.io-0.9.1~dfsg1/daemon/commit.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/commit.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,84 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + var ( + config = container.Config + newConfig runconfig.Config + ) + + if err := job.GetenvJson("config", &newConfig); err != nil { + return job.Error(err) + } + + if err := runconfig.Merge(&newConfig, config); err != nil { + return job.Error(err) + } + + img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", img.ID) + return engine.StatusOK +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository +func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) { + if pause { + container.Pause() + defer container.Unpause() + } + + if err := container.Mount(); err != nil { + return nil, err + } + defer container.Unmount() + + rwTar, err := container.ExportRw() + if err != nil { + return nil, err + } + defer rwTar.Close() + + // Create a new image from the container's base layers + a new layer from container changes + var ( + containerID, containerImage string + containerConfig *runconfig.Config + ) + + if container != nil { + containerID = container.ID + containerImage = container.Image + containerConfig = container.Config + } + + img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) + if err != nil { + return nil, err + } + + // Register the image if needed + if repository != "" { + if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { + return img, err + } + } + return img, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/config.go docker.io-1.3.2~dfsg1/daemon/config.go --- docker.io-0.9.1~dfsg1/daemon/config.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/config.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,86 @@ +package daemon + +import ( + "net" + + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// Config define the configuration of a docker daemon +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +// FIXME: separate runtime configuration from http api configuration +type Config struct { + Pidfile string + Root string + AutoRestart bool + Dns []string + DnsSearch []string + Mirrors []string + EnableIptables bool + EnableIpForward bool + EnableIpMasq bool + DefaultIp net.IP + BridgeIface string + BridgeIP string + FixedCIDR string + InsecureRegistries []string + InterContainerCommunication bool + GraphDriver string + GraphOptions []string + ExecDriver string + Mtu int + DisableNetwork bool + EnableSelinuxSupport bool + Context map[string][]string +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags() { + flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") + flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") + flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") + flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") + flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading for bridge's IP range") + flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") + flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") + flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") + opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") + flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") + flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") + flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") + flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") + opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") + opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") + // FIXME: why the inconsistency between "hosts" and "sockets"? + opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") + opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") + opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + // If so, do not forget to check the TODO in TestIsSecure + config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8") +} + +func GetDefaultNetworkMtu() int { + if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { + return iface.MTU + } + return defaultNetworkMtu +} diff -Nru docker.io-0.9.1~dfsg1/daemon/container.go docker.io-1.3.2~dfsg1/daemon/container.go --- docker.io-0.9.1~dfsg1/daemon/container.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/container.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,1246 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/label" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/links" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/etchosts" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +var ( + ErrNotATTY = errors.New("The PTY is not a file") + ErrNoTTY = errors.New("No PTY found") + ErrContainerStart = errors.New("The container failed to start. Unknown error") + ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") +) + +type StreamConfig struct { + stdout *broadcastwriter.BroadcastWriter + stderr *broadcastwriter.BroadcastWriter + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +type Container struct { + *State `json:"State"` // Needed for remote api version <= 1.11 + root string // Path to the "home" of the container, including metadata. + basefs string // Path to the graphdriver mountpoint + + ID string + + Created time.Time + + Path string + Args []string + + Config *runconfig.Config + Image string + + NetworkSettings *NetworkSettings + + ResolvConfPath string + HostnamePath string + HostsPath string + Name string + Driver string + ExecDriver string + + command *execdriver.Command + StreamConfig + + daemon *Daemon + MountLabel, ProcessLabel string + AppArmorProfile string + RestartCount int + + // Maps container paths to volume paths. The key in this is the path to which + // the volume is being mounted inside the container. Value is the path of the + // volume on disk + Volumes map[string]string + // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. + // Easier than migrating older container configs :) + VolumesRW map[string]bool + hostConfig *runconfig.HostConfig + + activeLinks map[string]*links.Link + monitor *containerMonitor + execCommands *execStore +} + +func (container *Container) FromDisk() error { + pth, err := container.jsonPath() + if err != nil { + return err + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + // Load container settings + // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it + if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +func (container *Container) toDisk() error { + data, err := json.Marshal(container) + if err != nil { + return err + } + + pth, err := container.jsonPath() + if err != nil { + return err + } + + err = ioutil.WriteFile(pth, data, 0666) + if err != nil { + return err + } + + return container.WriteHostConfig() +} + +func (container *Container) ToDisk() error { + container.Lock() + err := container.toDisk() + container.Unlock() + return err +} + +func (container *Container) readHostConfig() error { + container.hostConfig = &runconfig.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.hostConfig, + // but that's OK, since we just did that above.) + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + _, err = os.Stat(pth) + if os.IsNotExist(err) { + return nil + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + return json.Unmarshal(data, container.hostConfig) +} + +func (container *Container) WriteHostConfig() error { + data, err := json.Marshal(container.hostConfig) + if err != nil { + return err + } + + pth, err := container.hostConfigPath() + if err != nil { + return err + } + + return ioutil.WriteFile(pth, data, 0666) +} + +func (container *Container) LogEvent(action string) { + d := container.daemon + if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil { + log.Errorf("Error logging event %s for %s: %s", action, container.ID, err) + } +} + +func (container *Container) getResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) +} + +func (container *Container) getRootResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) +} + +func populateCommand(c *Container, env []string) error { + en := &execdriver.Network{ + Mtu: c.daemon.config.Mtu, + Interface: nil, + } + + parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "none": + case "host": + en.HostNetworking = true + case "bridge", "": // empty string to support existing containers + if !c.Config.NetworkDisabled { + network := c.NetworkSettings + en.Interface = &execdriver.NetworkInterface{ + Gateway: network.Gateway, + Bridge: network.Bridge, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + MacAddress: network.MacAddress, + } + } + case "container": + nc, err := c.getNetworkedContainer() + if err != nil { + return err + } + en.ContainerID = nc.ID + default: + return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) + } + + // Build lists of devices allowed and created within the container. + userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) + for i, deviceMapping := range c.hostConfig.Devices { + device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) + if err != nil { + return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) + } + device.Path = deviceMapping.PathInContainer + userSpecifiedDevices[i] = device + } + allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...) + + autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) + + // TODO: this can be removed after lxc-conf is fully deprecated + lxcConfig := mergeLxcConfIntoOptions(c.hostConfig) + + resources := &execdriver.Resources{ + Memory: c.Config.Memory, + MemorySwap: c.Config.MemorySwap, + CpuShares: c.Config.CpuShares, + Cpuset: c.Config.Cpuset, + } + + processConfig := execdriver.ProcessConfig{ + Privileged: c.hostConfig.Privileged, + Entrypoint: c.Path, + Arguments: c.Args, + Tty: c.Config.Tty, + User: c.Config.User, + } + + processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} + processConfig.Env = env + + c.command = &execdriver.Command{ + ID: c.ID, + Rootfs: c.RootfsPath(), + InitPath: "/.dockerinit", + WorkingDir: c.Config.WorkingDir, + Network: en, + Resources: resources, + AllowedDevices: allowedDevices, + AutoCreatedDevices: autoCreatedDevices, + CapAdd: c.hostConfig.CapAdd, + CapDrop: c.hostConfig.CapDrop, + ProcessConfig: processConfig, + ProcessLabel: c.GetProcessLabel(), + MountLabel: c.GetMountLabel(), + LxcConfig: lxcConfig, + AppArmorProfile: c.AppArmorProfile, + } + + return nil +} + +func (container *Container) Start() (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + // if we encounter and error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.cleanup() + } + }() + + if err := container.setupContainerDns(); err != nil { + return err + } + if err := container.Mount(); err != nil { + return err + } + if err := container.initializeNetworking(); err != nil { + return err + } + if err := container.updateParentsHosts(); err != nil { + return err + } + container.verifyDaemonSettings() + if err := container.prepareVolumes(); err != nil { + return err + } + linkedEnv, err := container.setupLinkedContainers() + if err != nil { + return err + } + if err := container.setupWorkingDirectory(); err != nil { + return err + } + env := container.createDaemonEnvironment(linkedEnv) + if err := populateCommand(container, env); err != nil { + return err + } + if err := container.setupMounts(); err != nil { + return err + } + + return container.waitForStart() +} + +func (container *Container) Run() error { + if err := container.Start(); err != nil { + return err + } + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Output() (output []byte, err error) { + pipe, err := container.StdoutPipe() + if err != nil { + return nil, err + } + defer pipe.Close() + if err := container.Start(); err != nil { + return nil, err + } + output, err = ioutil.ReadAll(pipe) + container.WaitStop(-1 * time.Second) + return output, err +} + +// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the container's active process. +// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". + +func (streamConfig *StreamConfig) StdinPipe() (io.WriteCloser, error) { + return streamConfig.stdinPipe, nil +} + +func (streamConfig *StreamConfig) StdoutPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + streamConfig.stdout.AddWriter(writer, "") + return ioutils.NewBufReader(reader), nil +} + +func (streamConfig *StreamConfig) StderrPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + streamConfig.stderr.AddWriter(writer, "") + return ioutils.NewBufReader(reader), nil +} + +func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stdout.AddWriter(writer, "stdout") + return ioutils.NewBufReader(reader) +} + +func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + streamConfig.stderr.AddWriter(writer, "stderr") + return ioutils.NewBufReader(reader) +} + +func (container *Container) buildHostnameFile() error { + hostnamePath, err := container.getRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + + if container.Config.Domainname != "" { + return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) + } + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +func (container *Container) buildHostsFiles(IP string) error { + + hostsPath, err := container.getRootResourcePath("hosts") + if err != nil { + return err + } + container.HostsPath = hostsPath + + extraContent := make(map[string]string) + + children, err := container.daemon.Children(container.Name) + if err != nil { + return err + } + + for linkAlias, child := range children { + _, alias := path.Split(linkAlias) + extraContent[alias] = child.NetworkSettings.IPAddress + } + + for _, extraHost := range container.hostConfig.ExtraHosts { + parts := strings.Split(extraHost, ":") + extraContent[parts[0]] = parts[1] + } + + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent) +} + +func (container *Container) buildHostnameAndHostsFiles(IP string) error { + if err := container.buildHostnameFile(); err != nil { + return err + } + + return container.buildHostsFiles(IP) +} + +func (container *Container) AllocateNetwork() error { + mode := container.hostConfig.NetworkMode + if container.Config.NetworkDisabled || !mode.IsPrivate() { + return nil + } + + var ( + env *engine.Env + err error + eng = container.daemon.eng + ) + + job := eng.Job("allocate_interface", container.ID) + if env, err = job.Stdout.AddEnv(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + // Error handling: At this point, the interface is allocated so we have to + // make sure that it is always released in case of error, otherwise we + // might leak resources. + + if container.Config.PortSpecs != nil { + if err = migratePortMappings(container.Config, container.hostConfig); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + container.Config.PortSpecs = nil + if err = container.WriteHostConfig(); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + } + + var ( + portSpecs = make(nat.PortSet) + bindings = make(nat.PortMap) + ) + + if container.Config.ExposedPorts != nil { + portSpecs = container.Config.ExposedPorts + } + + if container.hostConfig.PortBindings != nil { + for p, b := range container.hostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIp: bb.HostIp, + HostPort: bb.HostPort, + }) + } + } + } + + container.NetworkSettings.PortMapping = nil + + for port := range portSpecs { + if err = container.allocatePort(eng, port, bindings); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + } + container.WriteHostConfig() + + container.NetworkSettings.Ports = bindings + container.NetworkSettings.Bridge = env.Get("Bridge") + container.NetworkSettings.IPAddress = env.Get("IP") + container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") + container.NetworkSettings.MacAddress = env.Get("MacAddress") + container.NetworkSettings.Gateway = env.Get("Gateway") + + return nil +} + +func (container *Container) ReleaseNetwork() { + if container.Config.NetworkDisabled { + return + } + eng := container.daemon.eng + + eng.Job("release_interface", container.ID).Run() + container.NetworkSettings = &NetworkSettings{} +} + +func (container *Container) isNetworkAllocated() bool { + return container.NetworkSettings.IPAddress != "" +} + +func (container *Container) RestoreNetwork() error { + mode := container.hostConfig.NetworkMode + // Don't attempt a restore if we previously didn't allocate networking. + // This might be a legacy container with no network allocated, in which case the + // allocation will happen once and for all at start. + if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() { + return nil + } + + eng := container.daemon.eng + + // Re-allocate the interface with the same IP and MAC address. + job := eng.Job("allocate_interface", container.ID) + job.Setenv("RequestedIP", container.NetworkSettings.IPAddress) + job.Setenv("RequestedMac", container.NetworkSettings.MacAddress) + if err := job.Run(); err != nil { + return err + } + + // Re-allocate any previously allocated ports. + for port := range container.NetworkSettings.Ports { + if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil { + return err + } + } + return nil +} + +// cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (container *Container) cleanup() { + container.ReleaseNetwork() + + // Disable all active links + if container.activeLinks != nil { + for _, link := range container.activeLinks { + link.Disable() + } + } + + if err := container.Unmount(); err != nil { + log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) + } +} + +func (container *Container) KillSig(sig int) error { + log.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return nil + } + + // signal to the monitor that it should not restart the container + // after we send the kill signal + container.monitor.ExitOnNext() + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + return container.daemon.Kill(container, sig) +} + +func (container *Container) Pause() error { + if container.IsPaused() { + return fmt.Errorf("Container %s is already paused", container.ID) + } + if !container.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Pause(container) +} + +func (container *Container) Unpause() error { + if !container.IsPaused() { + return fmt.Errorf("Container %s is not paused", container.ID) + } + if !container.IsRunning() { + return fmt.Errorf("Container %s is not running", container.ID) + } + return container.daemon.Unpause(container) +} + +func (container *Container) Kill() error { + if !container.IsRunning() { + return nil + } + + // 1. Send SIGKILL + if err := container.KillSig(9); err != nil { + return err + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPid(); pid != 0 { + log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + return err + } + } + } + + container.WaitStop(-1 * time.Second) + return nil +} + +func (container *Container) Stop(seconds int) error { + if !container.IsRunning() { + return nil + } + + // 1. Send a SIGTERM + if err := container.KillSig(15); err != nil { + log.Infof("Failed to send SIGTERM to the process, force killing") + if err := container.KillSig(9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := container.Kill(); err != nil { + container.WaitStop(-1 * time.Second) + return err + } + } + return nil +} + +func (container *Container) Restart(seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := container.Mount(); err == nil { + defer container.Unmount() + } + + if err := container.Stop(seconds); err != nil { + return err + } + return container.Start() +} + +func (container *Container) Resize(h, w int) error { + return container.command.ProcessConfig.Terminal.Resize(h, w) +} + +func (container *Container) ExportRw() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + if container.daemon == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) + } + archive, err := container.daemon.Diff(container) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +func (container *Container) Export() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + archive, err := archive.Tar(container.basefs, archive.Uncompressed) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +func (container *Container) Mount() error { + return container.daemon.Mount(container) +} + +func (container *Container) changes() ([]archive.Change, error) { + return container.daemon.Changes(container) +} + +func (container *Container) Changes() ([]archive.Change, error) { + container.Lock() + defer container.Unlock() + return container.changes() +} + +func (container *Container) GetImage() (*image.Image, error) { + if container.daemon == nil { + return nil, fmt.Errorf("Can't get image of unregistered container") + } + return container.daemon.graph.Get(container.Image) +} + +func (container *Container) Unmount() error { + return container.daemon.Unmount(container) +} + +func (container *Container) logPath(name string) (string, error) { + return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) +} + +func (container *Container) ReadLog(name string) (io.Reader, error) { + pth, err := container.logPath(name) + if err != nil { + return nil, err + } + return os.Open(pth) +} + +func (container *Container) hostConfigPath() (string, error) { + return container.getRootResourcePath("hostconfig.json") +} + +func (container *Container) jsonPath() (string, error) { + return container.getRootResourcePath("config.json") +} + +// This method must be exported to be used from the lxc template +// This directory is only usable when the container is running +func (container *Container) RootfsPath() string { + return container.basefs +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +// GetSize, return real size, virtual size +func (container *Container) GetSize() (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + driver = container.daemon.driver + ) + + if err := container.Mount(); err != nil { + log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer container.Unmount() + + initID := fmt.Sprintf("%s-init", container.ID) + sizeRw, err = driver.DiffSize(container.ID, initID) + if err != nil { + log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if _, err = os.Stat(container.basefs); err != nil { + if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { + sizeRootfs = -1 + } + } + return sizeRw, sizeRootfs +} + +func (container *Container) Copy(resource string) (io.ReadCloser, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + var filter []string + + basePath, err := container.getResourcePath(resource) + if err != nil { + container.Unmount() + return nil, err + } + + stat, err := os.Stat(basePath) + if err != nil { + container.Unmount() + return nil, err + } + if !stat.IsDir() { + d, f := path.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{path.Base(basePath)} + basePath = path.Dir(basePath) + } + + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + Includes: filter, + }) + if err != nil { + container.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), + nil +} + +// Returns true if the container exposes a certain port +func (container *Container) Exposes(p nat.Port) bool { + _, exists := container.Config.ExposedPorts[p] + return exists +} + +func (container *Container) GetPtyMaster() (*os.File, error) { + ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal) + if !ok { + return nil, ErrNoTTY + } + return ttyConsole.Master(), nil +} + +func (container *Container) HostConfig() *runconfig.HostConfig { + container.Lock() + res := container.hostConfig + container.Unlock() + return res +} + +func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { + container.Lock() + container.hostConfig = hostConfig + container.Unlock() +} + +func (container *Container) DisableLink(name string) { + if container.activeLinks != nil { + if link, exists := container.activeLinks[name]; exists { + link.Disable() + } else { + log.Debugf("Could not find active link for %s", name) + } + } +} + +func (container *Container) setupContainerDns() error { + if container.ResolvConfPath != "" { + return nil + } + + var ( + config = container.hostConfig + daemon = container.daemon + ) + + resolvConf, err := resolvconf.Get() + if err != nil { + return err + } + container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") + if err != nil { + return err + } + + if config.NetworkMode != "host" { + // check configurations for any container/daemon dns settings + if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { + var ( + dns = resolvconf.GetNameservers(resolvConf) + dnsSearch = resolvconf.GetSearchDomains(resolvConf) + ) + if len(config.Dns) > 0 { + dns = config.Dns + } else if len(daemon.config.Dns) > 0 { + dns = daemon.config.Dns + } + if len(config.DnsSearch) > 0 { + dnsSearch = config.DnsSearch + } else if len(daemon.config.DnsSearch) > 0 { + dnsSearch = daemon.config.DnsSearch + } + return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) + } + + // replace any localhost/127.* nameservers + resolvConf = utils.RemoveLocalDns(resolvConf) + // if the resulting resolvConf is empty, use DefaultDns + if !bytes.Contains(resolvConf, []byte("nameserver")) { + log.Infof("No non localhost DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns) + // prefix the default dns options with nameserver + resolvConf = append(resolvConf, []byte("\nnameserver "+strings.Join(DefaultDns, "\nnameserver "))...) + } + } + return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644) +} + +func (container *Container) updateParentsHosts() error { + parents, err := container.daemon.Parents(container.Name) + if err != nil { + return err + } + for _, cid := range parents { + if cid == "0" { + continue + } + + c := container.daemon.Get(cid) + if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { + if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil { + return fmt.Errorf("Failed to update /etc/hosts in parent container: %v", err) + } + } + } + return nil +} + +func (container *Container) initializeNetworking() error { + var err error + if container.hostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + + parts := strings.SplitN(container.Config.Hostname, ".", 2) + if len(parts) > 1 { + container.Config.Hostname = parts[0] + container.Config.Domainname = parts[1] + } + + content, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + return container.buildHostnameAndHostsFiles("") + } else if err != nil { + return err + } + + if err := container.buildHostnameFile(); err != nil { + return err + } + + hostsPath, err := container.getRootResourcePath("hosts") + if err != nil { + return err + } + container.HostsPath = hostsPath + + return ioutil.WriteFile(container.HostsPath, content, 0644) + } + if container.hostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := container.getNetworkedContainer() + if err != nil { + return err + } + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + if container.daemon.config.DisableNetwork { + container.Config.NetworkDisabled = true + return container.buildHostnameAndHostsFiles("127.0.1.1") + } + if err := container.AllocateNetwork(); err != nil { + return err + } + return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) +} + +// Make sure the config is compatible with the current kernel +func (container *Container) verifyDaemonSettings() { + if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { + log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") + container.Config.Memory = 0 + } + if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { + log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") + container.Config.MemorySwap = -1 + } + if container.daemon.sysInfo.IPv4ForwardingDisabled { + log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work") + } +} + +func (container *Container) setupLinkedContainers() ([]string, error) { + var ( + env []string + daemon = container.daemon + ) + children, err := daemon.Children(container.Name) + if err != nil { + return nil, err + } + + if len(children) > 0 { + container.activeLinks = make(map[string]*links.Link, len(children)) + + // If we encounter an error make sure that we rollback any network + // config and ip table changes + rollback := func() { + for _, link := range container.activeLinks { + link.Disable() + } + container.activeLinks = nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + link, err := links.NewLink( + container.NetworkSettings.IPAddress, + child.NetworkSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + daemon.eng) + + if err != nil { + rollback() + return nil, err + } + + container.activeLinks[link.Alias()] = link + if err := link.Enable(); err != nil { + rollback() + return nil, err + } + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + } + return env, nil +} + +func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { + // if a domain name was specified, append it to the hostname (see #7851) + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + // Setup environment + env := []string{ + "PATH=" + DefaultPathEnv, + "HOSTNAME=" + fullHostname, + // Note: we don't set HOME here because it'll get autoset intelligently + // based on the value of USER inside dockerinit, but only if it isn't + // set already (ie, that can be overridden by setting HOME via -e or ENV + // in a Dockerfile). + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + + return env +} + +func (container *Container) setupWorkingDirectory() error { + if container.Config.WorkingDir != "" { + container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) + + pth, err := container.getResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + pthInfo, err := os.Stat(pth) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := os.MkdirAll(pth, 0755); err != nil { + return err + } + } + if pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + } + return nil +} + +func (container *Container) startLoggingToDisk() error { + // Setup logging of stdout and stderr to disk + pth, err := container.logPath("json") + if err != nil { + return err + } + + if err := container.daemon.LogToDisk(container.stdout, pth, "stdout"); err != nil { + return err + } + + if err := container.daemon.LogToDisk(container.stderr, pth, "stderr"); err != nil { + return err + } + + return nil +} + +func (container *Container) waitForStart() error { + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // block until we either receive an error from the initial start of the container's + // process or until the process is running in the container + select { + case <-container.monitor.startSignal: + case err := <-promise.Go(container.monitor.Start): + return err + } + + return nil +} + +func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error { + binding := bindings[port] + if container.hostConfig.PublishAllPorts && len(binding) == 0 { + binding = append(binding, nat.PortBinding{}) + } + + for i := 0; i < len(binding); i++ { + b := binding[i] + + job := eng.Job("allocate_port", container.ID) + job.Setenv("HostIP", b.HostIp) + job.Setenv("HostPort", b.HostPort) + job.Setenv("Proto", port.Proto()) + job.Setenv("ContainerPort", port.Port()) + + portEnv, err := job.Stdout.AddEnv() + if err != nil { + return err + } + if err := job.Run(); err != nil { + return err + } + b.HostIp = portEnv.Get("HostIP") + b.HostPort = portEnv.Get("HostPort") + + binding[i] = b + } + bindings[port] = binding + return nil +} + +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.hostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +func (container *Container) GetMountLabel() string { + if container.hostConfig.Privileged { + return "" + } + return container.MountLabel +} + +func (container *Container) getNetworkedContainer() (*Container, error) { + parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "container": + nc := container.daemon.Get(parts[1]) + if nc == nil { + return nil, fmt.Errorf("no such container to join network: %s", parts[1]) + } + if !nc.IsRunning() { + return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) + } + return nc, nil + default: + return nil, fmt.Errorf("network mode not set to container") + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/container_unit_test.go docker.io-1.3.2~dfsg1/daemon/container_unit_test.go --- docker.io-0.9.1~dfsg1/daemon/container_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/container_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,197 @@ +package daemon + +import ( + "github.com/docker/docker/nat" + "testing" +) + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + t.Logf("%v", len(ports)) + t.Logf("%v", bindings) + if len(ports) != 0 { + t.Logf("Expected nil got %s", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %s", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestGetFullName(t *testing.T) { + name, err := GetFullContainerName("testing") + if err != nil { + t.Fatal(err) + } + if name != "/testing" { + t.Fatalf("Expected /testing got %s", name) + } + if _, err := GetFullContainerName(""); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/copy.go docker.io-1.3.2~dfsg1/daemon/copy.go --- docker.io-0.9.1~dfsg1/daemon/copy.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/copy.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,33 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) + } + + var ( + name = job.Args[0] + resource = job.Args[1] + ) + + if container := daemon.Get(name); container != nil { + + data, err := container.Copy(resource) + if err != nil { + return job.Error(err) + } + defer data.Close() + + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/create.go docker.io-1.3.2~dfsg1/daemon/create.go --- docker.io-0.9.1~dfsg1/daemon/create.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/create.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,101 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { + var name string + if len(job.Args) == 1 { + name = job.Args[0] + } else if len(job.Args) > 1 { + return job.Errorf("Usage: %s", job.Name) + } + config := runconfig.ContainerConfigFromJob(job) + if config.Memory != 0 && config.Memory < 4194304 { + return job.Errorf("Minimum memory limit allowed is 4MB") + } + if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") + config.Memory = 0 + } + if config.Memory > 0 && !daemon.SystemConfig().SwapLimit { + job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + + var hostConfig *runconfig.HostConfig + if job.EnvExists("HostConfig") { + hostConfig = runconfig.ContainerHostConfigFromJob(job) + } else { + // Older versions of the API don't provide a HostConfig. + hostConfig = nil + } + + container, buildWarnings, err := daemon.Create(config, hostConfig, name) + if err != nil { + if daemon.Graph().IsNotExist(err) { + _, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) + } + return job.Error(err) + } + if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { + job.Errorf("IPv4 forwarding is disabled.\n") + } + container.LogEvent("create") + // FIXME: this is necessary because daemon.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } + for _, warning := range buildWarnings { + job.Errorf("%s\n", warning) + } + + return engine.StatusOK +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { + var ( + container *Container + warnings []string + ) + + img, err := daemon.repositories.LookupImage(config.Image) + if err != nil { + return nil, nil, err + } + if err := img.CheckDepth(); err != nil { + return nil, nil, err + } + if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { + return nil, nil, err + } + if container, err = daemon.newContainer(name, config, img); err != nil { + return nil, nil, err + } + if err := daemon.Register(container); err != nil { + return nil, nil, err + } + if err := daemon.createRootfs(container, img); err != nil { + return nil, nil, err + } + if hostConfig != nil { + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return nil, nil, err + } + } + if err := container.ToDisk(); err != nil { + return nil, nil, err + } + return container, warnings, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon_aufs.go docker.io-1.3.2~dfsg1/daemon/daemon_aufs.go --- docker.io-0.9.1~dfsg1/daemon/daemon_aufs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon_aufs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,22 @@ +// +build !exclude_graphdriver_aufs + +package daemon + +import ( + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/aufs" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/log" +) + +// Given the graphdriver ad, if it is aufs, then migrate it. +// If aufs driver is not built, this func is a noop. +func migrateIfAufs(driver graphdriver.Driver, root string) error { + if ad, ok := driver.(*aufs.Driver); ok { + log.Debugf("Migrating existing containers") + if err := ad.Migrate(root, graph.SetupInitLayer); err != nil { + return err + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon_btrfs.go docker.io-1.3.2~dfsg1/daemon/daemon_btrfs.go --- docker.io-0.9.1~dfsg1/daemon/daemon_btrfs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon_btrfs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_btrfs + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon_devicemapper.go docker.io-1.3.2~dfsg1/daemon/daemon_devicemapper.go --- docker.io-0.9.1~dfsg1/daemon/daemon_devicemapper.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon_devicemapper.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_devicemapper + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon.go docker.io-1.3.2~dfsg1/daemon/daemon.go --- docker.io-0.9.1~dfsg1/daemon/daemon.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,1126 @@ +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/libcontainer/label" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/execdrivers" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" + _ "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/trust" + "github.com/docker/docker/utils" + "github.com/docker/docker/volumes" +) + +var ( + DefaultDns = []string{"8.8.8.8", "8.8.4.4"} + validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) +) + +type contStore struct { + s map[string]*Container + sync.Mutex +} + +func (c *contStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +func (c *contStore) Get(id string) *Container { + c.Lock() + res := c.s[id] + c.Unlock() + return res +} + +func (c *contStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +func (c *contStore) List() []*Container { + containers := new(History) + c.Lock() + for _, cont := range c.s { + containers.Add(cont) + } + c.Unlock() + containers.Sort() + return *containers +} + +type Daemon struct { + repository string + sysInitPath string + containers *contStore + execCommands *execStore + graph *graph.Graph + repositories *graph.TagStore + idIndex *truncindex.TruncIndex + sysInfo *sysinfo.SysInfo + volumes *volumes.Repository + eng *engine.Engine + config *Config + containerGraph *graphdb.Database + driver graphdriver.Driver + execDriver execdriver.Driver + trustStore *trust.TrustStore +} + +// Install installs daemon capabilities to eng. +func (daemon *Daemon) Install(eng *engine.Engine) error { + // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ + for name, method := range map[string]engine.Handler{ + "attach": daemon.ContainerAttach, + "commit": daemon.ContainerCommit, + "container_changes": daemon.ContainerChanges, + "container_copy": daemon.ContainerCopy, + "container_inspect": daemon.ContainerInspect, + "containers": daemon.Containers, + "create": daemon.ContainerCreate, + "rm": daemon.ContainerRm, + "export": daemon.ContainerExport, + "info": daemon.CmdInfo, + "kill": daemon.ContainerKill, + "logs": daemon.ContainerLogs, + "pause": daemon.ContainerPause, + "resize": daemon.ContainerResize, + "restart": daemon.ContainerRestart, + "start": daemon.ContainerStart, + "stop": daemon.ContainerStop, + "top": daemon.ContainerTop, + "unpause": daemon.ContainerUnpause, + "wait": daemon.ContainerWait, + "image_delete": daemon.ImageDelete, // FIXME: see above + "execCreate": daemon.ContainerExecCreate, + "execStart": daemon.ContainerExecStart, + "execResize": daemon.ContainerExecResize, + } { + if err := eng.Register(name, method); err != nil { + return err + } + } + if err := daemon.Repositories().Install(eng); err != nil { + return err + } + if err := daemon.trustStore.Install(eng); err != nil { + return err + } + // FIXME: this hack is necessary for legacy integration tests to access + // the daemon object. + eng.Hack_SetGlobalVar("httpapi.daemon", daemon) + return nil +} + +// Get looks for a container by the specified ID or name, and returns it. +// If the container is not found, or if an error occurs, nil is returned. +func (daemon *Daemon) Get(name string) *Container { + if id, err := daemon.idIndex.Get(name); err == nil { + return daemon.containers.Get(id) + } + if c, _ := daemon.GetByName(name); c != nil { + return c + } + return nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + return daemon.Get(id) != nil +} + +func (daemon *Daemon) containerRoot(id string) string { + return path.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*Container, error) { + container := &Container{ + root: daemon.containerRoot(id), + State: NewState(), + execCommands: newExecStore(), + } + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + container.readHostConfig() + + return container, nil +} + +// Register makes a container object usable by the daemon as +// This is a wrapper for register +func (daemon *Daemon) Register(container *Container) error { + return daemon.register(container, true) +} + +// register makes a container object usable by the daemon as +func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { + if container.daemon != nil || daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if err := daemon.ensureName(container); err != nil { + return err + } + + container.daemon = daemon + + // Attach to stdout and stderr + container.stderr = broadcastwriter.New() + container.stdout = broadcastwriter.New() + // Attach to stdin + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } else { + container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + // done + daemon.containers.Add(container.ID, container) + + // don't update the Suffixarray if we're starting up + // we'll waste time if we update it for every container + daemon.idIndex.Add(container.ID) + + // FIXME: if the container is supposed to be running but is not, auto restart it? + // if so, then we need to restart monitor and init a new lock + // If the container is supposed to be running, make sure of it + if container.IsRunning() { + log.Debugf("killing old running container %s", container.ID) + + existingPid := container.Pid + container.SetStopped(0) + + // We only have to handle this for lxc because the other drivers will ensure that + // no processes are left when docker dies + if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { + lxc.KillLxc(container.ID, 9) + } else { + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + ID: container.ID, + } + var err error + cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) + if err != nil { + log.Debugf("cannot find existing process for %d", existingPid) + } + daemon.execDriver.Terminate(cmd) + } + + if err := container.Unmount(); err != nil { + log.Debugf("unmount error %s", err) + } + if err := container.ToDisk(); err != nil { + log.Debugf("saving stopped state to disk %s", err) + } + + info := daemon.execDriver.Info(container.ID) + if !info.IsRunning() { + log.Debugf("Container %s was supposed to be running but is not.", container.ID) + + log.Debugf("Marking as stopped") + + container.SetStopped(-127) + if err := container.ToDisk(); err != nil { + return err + } + } + } + return nil +} + +func (daemon *Daemon) ensureName(container *Container) error { + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDisk(); err != nil { + log.Debugf("Error saving container name %s", err) + } + } + return nil +} + +func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error { + log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + src.AddWriter(log, stream) + return nil +} + +func (daemon *Daemon) restore() error { + var ( + debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + containers = make(map[string]*Container) + currentDriver = daemon.driver.String() + ) + + if !debug { + log.Infof("Loading containers: ") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug { + fmt.Print(".") + } + if err != nil { + log.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + log.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + registeredContainers := []*Container{} + + if entities := daemon.containerGraph.List("/", -1); entities != nil { + for _, p := range entities.Paths() { + if !debug { + fmt.Print(".") + } + + e := entities[p] + + if container, ok := containers[e.ID()]; ok { + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) + } + + registeredContainers = append(registeredContainers, container) + + // delete from the map so that a new name is not automatically generated + delete(containers, e.ID()) + } + } + } + + // Any containers that are left over do not exist in the graph + for _, container := range containers { + // Try to set the default name for a container if it exists prior to links + container.Name, err = daemon.generateNewName(container.ID) + if err != nil { + log.Debugf("Setting default id - %s", err) + } + + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) + } + + registeredContainers = append(registeredContainers, container) + } + + // check the restart policy on the containers and restart any container with + // the restart policy of "always" + if daemon.config.AutoRestart { + log.Debugf("Restarting containers...") + + for _, container := range registeredContainers { + if container.hostConfig.RestartPolicy.Name == "always" || + (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) { + log.Debugf("Starting container %s", container.ID) + + if err := container.Start(); err != nil { + log.Debugf("Failed to start container %s: %s", container.ID, err) + } + } + } + } + + for _, c := range registeredContainers { + c.registerVolumes() + } + + if !debug { + log.Infof(": done.") + } + + return nil +} + +func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool { + if config != nil { + if config.PortSpecs != nil { + for _, p := range config.PortSpecs { + if strings.Contains(p, ":") { + return true + } + } + } + } + return false +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) { + warnings := []string{} + if daemon.checkDeprecatedExpose(img.Config) || daemon.checkDeprecatedExpose(config) { + warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") + } + if img.Config != nil { + if err := runconfig.Merge(config, img.Config); err != nil { + return nil, err + } + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return nil, fmt.Errorf("No command specified") + } + return warnings, nil +} + +func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { + var ( + err error + id = utils.GenerateRandomID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + + conflictingContainer, err := daemon.GetByName(name) + if err != nil { + if strings.Contains(err.Error(), "Could not find entity") { + return "", err + } + + // Remove name and continue starting the container + if err := daemon.containerGraph.Delete(name); err != nil { + return "", err + } + } else { + nameAsKnownByUser := strings.TrimPrefix(name, "/") + return "", fmt.Errorf( + "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, + utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) + } + } + return name, nil +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if _, err := daemon.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return "", err + } + continue + } + return name, nil + } + + name = "/" + utils.TruncateID(id) + if _, err := daemon.containerGraph.Set(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { + // Generate default hostname + // FIXME: the lxc template no longer needs to set a default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) { + var ( + entrypoint string + args []string + ) + if len(configEntrypoint) != 0 { + entrypoint = configEntrypoint[0] + args = append(configEntrypoint[1:], configCmd...) + } else { + entrypoint = configCmd[0] + args = configCmd[1:] + } + return entrypoint, args +} + +func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + con := strings.SplitN(opt, ":", 2) + if len(con) == 1 { + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) { + var ( + id string + err error + ) + id, name, err = daemon.generateIdAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + container := &Container{ + // FIXME: we should generate the ID here instead of receiving it as an argument + ID: id, + Created: time.Now().UTC(), + Path: entrypoint, + Args: args, //FIXME: de-duplicate from config + Config: config, + hostConfig: &runconfig.HostConfig{}, + Image: img.ID, // Always use the resolved image id + NetworkSettings: &NetworkSettings{}, + Name: name, + Driver: daemon.driver.String(), + ExecDriver: daemon.execDriver.Name(), + State: NewState(), + execCommands: newExecStore(), + } + container.root = daemon.containerRoot(container.ID) + return container, err +} + +func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error { + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + if err := os.Mkdir(container.root, 0700); err != nil { + return err + } + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Create(initID, img.ID); err != nil { + return err + } + initPath, err := daemon.driver.Get(initID, "") + if err != nil { + return err + } + defer daemon.driver.Put(initID) + + if err := graph.SetupInitLayer(initPath); err != nil { + return err + } + + if err := daemon.driver.Create(container.ID, initID); err != nil { + return err + } + return nil +} + +func GetFullContainerName(name string) (string, error) { + if name == "" { + return "", fmt.Errorf("Container name cannot be empty") + } + if name[0] != '/' { + name = "/" + name + } + return name, nil +} + +func (daemon *Daemon) GetByName(name string) (*Container, error) { + fullName, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + entity := daemon.containerGraph.Get(fullName) + if entity == nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(entity.ID()) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) + } + return e, nil +} + +func (daemon *Daemon) Children(name string) (map[string]*Container, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + children := make(map[string]*Container) + + err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { + c := daemon.Get(e.ID()) + if c == nil { + return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) + } + children[p] = c + return nil + }, 0) + + if err != nil { + return nil, err + } + return children, nil +} + +func (daemon *Daemon) Parents(name string) ([]string, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + + return daemon.containerGraph.Parents(name) +} + +func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if !daemon.containerGraph.Exists(fullName) { + _, err := daemon.containerGraph.Set(fullName, child.ID) + return err + } + return nil +} + +func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { + if hostConfig != nil && hostConfig.Links != nil { + for _, l := range hostConfig.Links { + parts, err := parsers.PartParser("name:alias", l) + if err != nil { + return err + } + child, err := daemon.GetByName(parts["name"]) + if err != nil { + return err + } + if child == nil { + return fmt.Errorf("Could not get container for %s", parts["name"]) + } + if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + return nil +} + +// FIXME: harmonize with NewGraph() +func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { + daemon, err := NewDaemonFromDirectory(config, eng) + if err != nil { + return nil, err + } + return daemon, nil +} + +func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { + // Apply configuration defaults + if config.Mtu == 0 { + // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore + config.Mtu = GetDefaultNetworkMtu() + } + // Check for mutually incompatible config options + if config.BridgeIface != "" && config.BridgeIP != "" { + return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") + } + if !config.EnableIptables && !config.InterContainerCommunication { + return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") + } + if !config.EnableIptables && config.EnableIpMasq { + config.EnableIpMasq = false + } + config.DisableNetwork = config.BridgeIface == disableNetworkBridge + + // Claim the pidfile first, to avoid any and all unexpected race conditions. + // Some of the init doesn't need a pidfile lock - but let's not try to be smart. + if config.Pidfile != "" { + if err := utils.CreatePidFile(config.Pidfile); err != nil { + return nil, err + } + eng.OnShutdown(func() { + // Always release the pidfile last, just in case + utils.RemovePidFile(config.Pidfile) + }) + } + + // Check that the system is supported and we have sufficient privileges + if runtime.GOOS != "linux" { + return nil, fmt.Errorf("The Docker daemon is only supported on linux") + } + if os.Geteuid() != 0 { + return nil, fmt.Errorf("The Docker daemon needs to be run as root") + } + if err := checkKernelAndArch(); err != nil { + return nil, err + } + + // set up the TempDir to use a canonical path + tmp, err := utils.TempDir(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := utils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + if !config.EnableSelinuxSupport { + selinuxSetDisabled() + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = utils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + config.Root = realRoot + // Create the root directory if it doesn't exists + if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Set the default driver + graphdriver.DefaultDriver = config.GraphDriver + + // Load storage driver + driver, err := graphdriver.New(config.Root, config.GraphOptions) + if err != nil { + return nil, err + } + log.Debugf("Using graph driver %s", driver) + + // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled + if selinuxEnabled() && config.EnableSelinuxSupport && driver.String() == "btrfs" { + return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!") + } + + daemonRepo := path.Join(config.Root, "containers") + + if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + // Migrate the container if it is aufs and aufs is enabled + if err = migrateIfAufs(driver, config.Root); err != nil { + return nil, err + } + + log.Debugf("Creating images graph") + g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) + if err != nil { + return nil, err + } + + volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) + if err != nil { + return nil, err + } + + volumes, err := volumes.NewRepository(path.Join(config.Root, "volumes"), volumesDriver) + if err != nil { + return nil, err + } + + log.Debugf("Creating repository list") + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors, config.InsecureRegistries) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store: %s", err) + } + + trustDir := path.Join(config.Root, "trust") + if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + t, err := trust.NewTrustStore(trustDir) + if err != nil { + return nil, fmt.Errorf("could not create trust store: %s", err) + } + + if !config.DisableNetwork { + job := eng.Job("init_networkdriver") + + job.SetenvBool("EnableIptables", config.EnableIptables) + job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) + job.SetenvBool("EnableIpForward", config.EnableIpForward) + job.SetenvBool("EnableIpMasq", config.EnableIpMasq) + job.Setenv("BridgeIface", config.BridgeIface) + job.Setenv("BridgeIP", config.BridgeIP) + job.Setenv("FixedCIDR", config.FixedCIDR) + job.Setenv("DefaultBindingIP", config.DefaultIp.String()) + + if err := job.Run(); err != nil { + return nil, err + } + } + + graphdbPath := path.Join(config.Root, "linkgraph.db") + graph, err := graphdb.NewSqliteConn(graphdbPath) + if err != nil { + return nil, err + } + + localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) + sysInitPath := utils.DockerInitPath(localCopy) + if sysInitPath == "" { + return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.") + } + + if sysInitPath != localCopy { + // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). + if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { + return nil, err + } + if err := os.Chmod(localCopy, 0700); err != nil { + return nil, err + } + sysInitPath = localCopy + } + + sysInfo := sysinfo.New(false) + ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo) + if err != nil { + return nil, err + } + + daemon := &Daemon{ + repository: daemonRepo, + containers: &contStore{s: make(map[string]*Container)}, + execCommands: newExecStore(), + graph: g, + repositories: repositories, + idIndex: truncindex.NewTruncIndex([]string{}), + sysInfo: sysInfo, + volumes: volumes, + config: config, + containerGraph: graph, + driver: driver, + sysInitPath: sysInitPath, + execDriver: ed, + eng: eng, + trustStore: t, + } + if err := daemon.restore(); err != nil { + return nil, err + } + // Setup shutdown handlers + // FIXME: can these shutdown handlers be registered closer to their source? + eng.OnShutdown(func() { + // FIXME: if these cleanup steps can be called concurrently, register + // them as separate handlers to speed up total shutdown time + // FIXME: use engine logging instead of log.Errorf + if err := daemon.shutdown(); err != nil { + log.Errorf("daemon.shutdown(): %s", err) + } + if err := portallocator.ReleaseAll(); err != nil { + log.Errorf("portallocator.ReleaseAll(): %s", err) + } + if err := daemon.driver.Cleanup(); err != nil { + log.Errorf("daemon.driver.Cleanup(): %s", err.Error()) + } + if err := daemon.containerGraph.Close(); err != nil { + log.Errorf("daemon.containerGraph.Close(): %s", err.Error()) + } + }) + + return daemon, nil +} + +func (daemon *Daemon) shutdown() error { + group := sync.WaitGroup{} + log.Debugf("starting clean shutdown of all containers...") + for _, container := range daemon.List() { + c := container + if c.IsRunning() { + log.Debugf("stopping %s", c.ID) + group.Add(1) + + go func() { + defer group.Done() + if err := c.KillSig(15); err != nil { + log.Debugf("kill 15 error for %s - %s", c.ID, err) + } + c.WaitStop(-1 * time.Second) + log.Debugf("container stopped %s", c.ID) + }() + } + } + group.Wait() + + return nil +} + +func (daemon *Daemon) Mount(container *Container) error { + dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) + if err != nil { + return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) + } + if container.basefs == "" { + container.basefs = dir + } else if container.basefs != dir { + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.driver, container.ID, container.basefs, dir) + } + return nil +} + +func (daemon *Daemon) Unmount(container *Container) error { + daemon.driver.Put(container.ID) + return nil +} + +func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Changes(container.ID, initID) +} + +func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Diff(container.ID, initID) +} + +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return daemon.execDriver.Run(c.command, pipes, startCallback) +} + +func (daemon *Daemon) Pause(c *Container) error { + if err := daemon.execDriver.Pause(c.command); err != nil { + return err + } + c.SetPaused() + return nil +} + +func (daemon *Daemon) Unpause(c *Container) error { + if err := daemon.execDriver.Unpause(c.command); err != nil { + return err + } + c.SetUnpaused() + return nil +} + +func (daemon *Daemon) Kill(c *Container, sig int) error { + return daemon.execDriver.Kill(c.command, sig) +} + +// Nuke kills all containers then removes all content +// from the content root, including images, volumes and +// container filesystems. +// Again: this will remove your entire docker daemon! +// FIXME: this is deprecated, and only used in legacy +// tests. Please remove. +func (daemon *Daemon) Nuke() error { + var wg sync.WaitGroup + for _, container := range daemon.List() { + wg.Add(1) + go func(c *Container) { + c.Kill() + wg.Done() + }(container) + } + wg.Wait() + + return os.RemoveAll(daemon.config.Root) +} + +// FIXME: this is a convenience function for integration tests +// which need direct access to daemon.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (daemon *Daemon) Graph() *graph.Graph { + return daemon.graph +} + +func (daemon *Daemon) Repositories() *graph.TagStore { + return daemon.repositories +} + +func (daemon *Daemon) Config() *Config { + return daemon.config +} + +func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo { + return daemon.sysInfo +} + +func (daemon *Daemon) SystemInitPath() string { + return daemon.sysInitPath +} + +func (daemon *Daemon) GraphDriver() graphdriver.Driver { + return daemon.driver +} + +func (daemon *Daemon) ExecutionDriver() execdriver.Driver { + return daemon.execDriver +} + +func (daemon *Daemon) ContainerGraph() *graphdb.Database { + return daemon.containerGraph +} + +func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // Retrieve all images + images, err := daemon.Graph().Map() + if err != nil { + return nil, err + } + + // Store the tree in a map of map (map[parentId][childId]) + imageMap := make(map[string]map[string]struct{}) + for _, img := range images { + if _, exists := imageMap[img.Parent]; !exists { + imageMap[img.Parent] = make(map[string]struct{}) + } + imageMap[img.Parent][img.ID] = struct{}{} + } + + // Loop on the children of the given image and check the config + var match *image.Image + for elem := range imageMap[imgID] { + img, err := daemon.Graph().Get(elem) + if err != nil { + return nil, err + } + if runconfig.Compare(&img.ContainerConfig, config) { + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil +} + +func checkKernelAndArch() error { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { + return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.8 crashes are clearer. + // For details see http://github.com/docker/docker/issues/407 + if k, err := kernel.GetKernelVersion(); err != nil { + log.Infof("WARNING: %s", err) + } else { + if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon_no_aufs.go docker.io-1.3.2~dfsg1/daemon/daemon_no_aufs.go --- docker.io-0.9.1~dfsg1/daemon/daemon_no_aufs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon_no_aufs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +// +build exclude_graphdriver_aufs + +package daemon + +import ( + "github.com/docker/docker/daemon/graphdriver" +) + +func migrateIfAufs(driver graphdriver.Driver, root string) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/daemon_unit_test.go docker.io-1.3.2~dfsg1/daemon/daemon_unit_test.go --- docker.io-0.9.1~dfsg1/daemon/daemon_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/daemon_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" +) + +func TestParseSecurityOpt(t *testing.T) { + container := &Container{} + config := &runconfig.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor:test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test valid label + config.SecurityOpt = []string{"label:user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/delete.go docker.io-1.3.2~dfsg1/daemon/delete.go --- docker.io-0.9.1~dfsg1/daemon/delete.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/delete.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,125 @@ +package daemon + +import ( + "fmt" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" +) + +func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + removeVolume := job.GetenvBool("removeVolume") + removeLink := job.GetenvBool("removeLink") + forceRemove := job.GetenvBool("forceRemove") + container := daemon.Get(name) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + + if removeLink { + name, err := GetFullContainerName(name) + if err != nil { + job.Error(err) + } + parent, n := path.Split(name) + if parent == "/" { + return job.Errorf("Conflict, cannot remove the default name of the container") + } + pe := daemon.ContainerGraph().Get(parent) + if pe == nil { + return job.Errorf("Cannot get parent %s for name %s", parent, name) + } + parentContainer := daemon.Get(pe.ID()) + + if parentContainer != nil { + parentContainer.DisableLink(n) + } + + if err := daemon.ContainerGraph().Delete(name); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + + if container != nil { + if container.IsRunning() { + if forceRemove { + if err := container.Kill(); err != nil { + return job.Errorf("Could not kill running container, cannot remove - %v", err) + } + } else { + return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f") + } + } + if err := daemon.Destroy(container); err != nil { + return job.Errorf("Cannot destroy container %s: %s", name, err) + } + container.LogEvent("destroy") + if removeVolume { + daemon.DeleteVolumes(container.VolumePaths()) + } + } + return engine.StatusOK +} + +func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) { + for id := range volumeIDs { + if err := daemon.volumes.Delete(id); err != nil { + log.Infof("%s", err) + continue + } + } +} + +// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. +// FIXME: rename to Rm for consistency with the CLI command +func (daemon *Daemon) Destroy(container *Container) error { + if container == nil { + return fmt.Errorf("The given container is ") + } + + element := daemon.containers.Get(container.ID) + if element == nil { + return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) + } + + if err := container.Stop(3); err != nil { + return err + } + + // Deregister the container before removing its directory, to avoid race conditions + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + container.derefVolumes() + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + log.Debugf("Unable to remove container from link graph: %s", err) + } + + if err := daemon.driver.Remove(container.ID); err != nil { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) + } + + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Remove(initID); err != nil { + return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) + } + + if err := os.RemoveAll(container.root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + if err := daemon.execDriver.Clean(container.ID); err != nil { + return fmt.Errorf("Unable to remove execdriver data for %s: %s", container.ID, err) + } + + selinuxFreeLxcContexts(container.ProcessLabel) + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/driver.go docker.io-1.3.2~dfsg1/daemon/execdriver/driver.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,121 @@ +package execdriver + +import ( + "errors" + "io" + "os" + "os/exec" + + "github.com/docker/libcontainer/devices" +) + +// Context is a generic key value pair that allows +// arbatrary data to be sent +type Context map[string]string + +var ( + ErrNotRunning = errors.New("Process could not be started") + ErrWaitTimeoutReached = errors.New("Wait timeout reached") + ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") + ErrDriverNotFound = errors.New("The requested docker init has not been found") +) + +type StartCallback func(*ProcessConfig, int) + +// Driver specific information based on +// processes registered with the driver +type Info interface { + IsRunning() bool +} + +// Terminal in an interface for drivers to implement +// if they want to support Close and Resize calls from +// the core +type Terminal interface { + io.Closer + Resize(height, width int) error +} + +type TtyTerminal interface { + Master() *os.File +} + +type Driver interface { + Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + // Exec executes the process in an existing container, blocks until the process exits and returns the exit code + Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) + Kill(c *Command, sig int) error + Pause(c *Command) error + Unpause(c *Command) error + Name() string // Driver name + Info(id string) Info // "temporary" hack (until we move state from core to plugins) + GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. + Terminate(c *Command) error // kill it with fire + Clean(id string) error // clean all traces of container exec +} + +// Network settings of the container +type Network struct { + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` + ContainerID string `json:"container_id"` // id of the container to join network. + HostNetworking bool `json:"host_networking"` +} + +type NetworkInterface struct { + Gateway string `json:"gateway"` + IPAddress string `json:"ip"` + IPPrefixLen int `json:"ip_prefix_len"` + MacAddress string `json:"mac_address"` + Bridge string `json:"bridge"` +} + +type Resources struct { + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` + Cpuset string `json:"cpuset"` +} + +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Private bool `json:"private"` + Slave bool `json:"slave"` +} + +// Describes a process that will be run inside a container. +type ProcessConfig struct { + exec.Cmd `json:"-"` + + Privileged bool `json:"privileged"` + User string `json:"user"` + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Terminal Terminal `json:"-"` // standard or tty terminal + Console string `json:"-"` // dev/console path +} + +// Process wrapps an os/exec.Cmd to add more metadata +type Command struct { + ID string `json:"id"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Network *Network `json:"network"` + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` + AllowedDevices []*devices.Device `json:"allowed_devices"` + AutoCreatedDevices []*devices.Device `json:"autocreated_devices"` + CapAdd []string `json:"cap_add"` + CapDrop []string `json:"cap_drop"` + ContainerPid int `json:"container_pid"` // the pid for the process inside a container + ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container. + ProcessLabel string `json:"process_label"` + MountLabel string `json:"mount_label"` + LxcConfig []string `json:"lxc_config"` + AppArmorProfile string `json:"apparmor_profile"` +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/execdrivers/execdrivers.go docker.io-1.3.2~dfsg1/daemon/execdriver/execdrivers/execdrivers.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/execdrivers/execdrivers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/execdrivers/execdrivers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +package execdrivers + +import ( + "fmt" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/sysinfo" + "path" +) + +func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "lxc": + // we want to give the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + return lxc.NewDriver(root, initPath, sysInfo.AppArmor) + case "native": + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/driver.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/driver.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,527 @@ +package lxc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/kr/pty" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/mount/nodes" +) + +const DriverName = "lxc" + +var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver") + +type driver struct { + root string // root path for the driver to use + initPath string + apparmor bool + sharedRoot bool +} + +func NewDriver(root, initPath string, apparmor bool) (*driver, error) { + // setup unconfined symlink + if err := linkLxcStart(root); err != nil { + return nil, err + } + + return &driver{ + apparmor: apparmor, + root: root, + initPath: initPath, + sharedRoot: rootIsShared(), + }, nil +} + +func (d *driver) Name() string { + version := d.version() + return fmt.Sprintf("%s-%s", DriverName, version) +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + var ( + term execdriver.Terminal + err error + ) + + if c.ProcessConfig.Tty { + term, err = NewTtyConsole(&c.ProcessConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) + } + c.ProcessConfig.Terminal = term + + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: d.initPath, + Destination: c.InitPath, + Writable: false, + Private: true, + }) + + if err := d.generateEnvConfig(c); err != nil { + return -1, err + } + configPath, err := d.generateLXCConfig(c) + if err != nil { + return -1, err + } + params := []string{ + "lxc-start", + "-n", c.ID, + "-f", configPath, + "--", + c.InitPath, + } + + if c.Network.Interface != nil { + params = append(params, + "-g", c.Network.Interface.Gateway, + "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + ) + } + params = append(params, + "-mtu", strconv.Itoa(c.Network.Mtu), + ) + + if c.ProcessConfig.User != "" { + params = append(params, "-u", c.ProcessConfig.User) + } + + if c.ProcessConfig.Privileged { + if d.apparmor { + params[0] = path.Join(d.root, "lxc-start-unconfined") + + } + params = append(params, "-privileged") + } + + if c.WorkingDir != "" { + params = append(params, "-w", c.WorkingDir) + } + + if len(c.CapAdd) > 0 { + params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":"))) + } + + if len(c.CapDrop) > 0 { + params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":"))) + } + + params = append(params, "--", c.ProcessConfig.Entrypoint) + params = append(params, c.ProcessConfig.Arguments...) + + if d.sharedRoot { + // lxc-start really needs / to be non-shared, or all kinds of stuff break + // when lxc-start unmount things and those unmounts propagate to the main + // mount namespace. + // What we really want is to clone into a new namespace and then + // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork + // without exec in go we have to do this horrible shell hack... + shellString := + "mount --make-rslave /; exec " + + utils.ShellQuoteArguments(params) + + params = []string{ + "unshare", "-m", "--", "/bin/sh", "-c", shellString, + } + } + + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.ProcessConfig.Path = aname + c.ProcessConfig.Args = append([]string{name}, arg...) + + if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { + return -1, err + } + + if err := c.ProcessConfig.Start(); err != nil { + return -1, err + } + + var ( + waitErr error + waitLock = make(chan struct{}) + ) + + go func() { + if err := c.ProcessConfig.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } + } + close(waitLock) + }() + + // Poll lxc for RUNNING status + pid, err := d.waitForStart(c, waitLock) + if err != nil { + if c.ProcessConfig.Process != nil { + c.ProcessConfig.Process.Kill() + c.ProcessConfig.Wait() + } + return -1, err + } + + c.ContainerPid = pid + + if startCallback != nil { + startCallback(&c.ProcessConfig, pid) + } + + <-waitLock + + return getExitCode(c), waitErr +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessConfig.ProcessState == nil { + return -1 + } + return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + return KillLxc(c.ID, sig) +} + +func (d *driver) Pause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-freeze") + if err == nil { + output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Unpause(c *execdriver.Command) error { + _, err := exec.LookPath("lxc-unfreeze") + if err == nil { + output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() + if errExec != nil { + return fmt.Errorf("Err: %s Output: %s", errExec, output) + } + } + + return err +} + +func (d *driver) Terminate(c *execdriver.Command) error { + return KillLxc(c.ID, 9) +} + +func (d *driver) version() string { + var ( + version string + output []byte + err error + ) + if _, errPath := exec.LookPath("lxc-version"); errPath == nil { + output, err = exec.Command("lxc-version").CombinedOutput() + } else { + output, err = exec.Command("lxc-start", "--version").CombinedOutput() + } + if err == nil { + version = strings.TrimSpace(string(output)) + if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { + version = strings.TrimSpace(parts[1]) + } + } + return version +} + +func KillLxc(id string, sig int) error { + var ( + err error + output []byte + ) + _, err = exec.LookPath("lxc-kill") + if err == nil { + output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } else { + output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } + if err != nil { + return fmt.Errorf("Err: %s Output: %s", err, output) + } + return nil +} + +// wait for the process to start and return the pid for the process +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { + var ( + err error + output []byte + ) + // We wait for the container to be fully running. + // Timeout after 5 seconds. In case of broken pipe, just retry. + // Note: The container can run and finish correctly before + // the end of this loop + for now := time.Now(); time.Since(now) < 5*time.Second; { + select { + case <-waitLock: + // If the process dies while waiting for it, just return + return -1, nil + default: + } + + output, err = d.getInfo(c.ID) + if err == nil { + info, err := parseLxcInfo(string(output)) + if err != nil { + return -1, err + } + if info.Running { + return info.Pid, nil + } + } + time.Sleep(50 * time.Millisecond) + } + return -1, execdriver.ErrNotRunning +} + +func (d *driver) getInfo(id string) ([]byte, error) { + return exec.Command("lxc-info", "-n", id).CombinedOutput() +} + +type info struct { + ID string + driver *driver +} + +func (i *info) IsRunning() bool { + var running bool + + output, err := i.driver.getInfo(i.ID) + if err != nil { + log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false + } + if strings.Contains(string(output), "RUNNING") { + running = true + } + return running +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + // cpu is chosen because it is the only non optional subsystem in cgroups + subsystem := "cpu" + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + // With more recent lxc versions use, cgroup will be in lxc/ + filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func linkLxcStart(root string) error { + sourcePath, err := exec.LookPath("lxc-start") + if err != nil { + return err + } + targetPath := path.Join(root, "lxc-start-unconfined") + + if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if err := os.Remove(targetPath); err != nil { + return err + } + } + return os.Symlink(sourcePath, targetPath) +} + +// TODO: This can be moved to the mountinfo reader in the mount pkg +func rootIsShared() bool { + if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + cols := strings.Split(line, " ") + if len(cols) >= 6 && cols[4] == "/" { + return strings.HasPrefix(cols[6], "shared") + } + } + } + + // No idea, probably safe to assume so + return true +} + +func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { + root := path.Join(d.root, "containers", c.ID, "config.lxc") + + fo, err := os.Create(root) + if err != nil { + return "", err + } + defer fo.Close() + + if err := LxcTemplateCompiled.Execute(fo, struct { + *execdriver.Command + AppArmor bool + }{ + Command: c, + AppArmor: d.apparmor, + }); err != nil { + return "", err + } + + return root, nil +} + +func (d *driver) generateEnvConfig(c *execdriver.Command) error { + data, err := json.Marshal(c.ProcessConfig.Env) + if err != nil { + return err + } + p := path.Join(d.root, "containers", c.ID, "config.env") + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: p, + Destination: "/.dockerenv", + Writable: false, + Private: true, + }) + + return ioutil.WriteFile(p, data, 0600) +} + +// Clean not implemented for lxc +func (d *driver) Clean(id string) error { + return nil +} + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { + // lxc is special in that we cannot create the master outside of the container without + // opening the slave because we have nothing to provide to the cmd. We have to open both then do + // the crazy setup on command right now instead of passing the console path to lxc and telling it + // to open up that console. we save a couple of openfiles in the native driver because we can do + // this. + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + + if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + processConfig.Console = tty.SlavePty.Name() + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} + +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return -1, ErrExec +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/info.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/info.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/info.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/info.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,50 @@ +package lxc + +import ( + "bufio" + "errors" + "strconv" + "strings" +) + +var ( + ErrCannotParse = errors.New("cannot parse raw input") +) + +type lxcInfo struct { + Running bool + Pid int +} + +func parseLxcInfo(raw string) (*lxcInfo, error) { + if raw == "" { + return nil, ErrCannotParse + } + var ( + err error + s = bufio.NewScanner(strings.NewReader(raw)) + info = &lxcInfo{} + ) + for s.Scan() { + text := s.Text() + + if s.Err() != nil { + return nil, s.Err() + } + + parts := strings.Split(text, ":") + if len(parts) < 2 { + continue + } + switch strings.ToLower(strings.TrimSpace(parts[0])) { + case "state": + info.Running = strings.TrimSpace(parts[1]) == "RUNNING" + case "pid": + info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } + } + return info, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/info_test.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/info_test.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/info_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/info_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,36 @@ +package lxc + +import ( + "testing" +) + +func TestParseRunningInfo(t *testing.T) { + raw := ` + state: RUNNING + pid: 50` + + info, err := parseLxcInfo(raw) + if err != nil { + t.Fatal(err) + } + if !info.Running { + t.Fatal("info should return a running state") + } + if info.Pid != 50 { + t.Fatalf("info should have pid 50 got %d", info.Pid) + } +} + +func TestEmptyInfo(t *testing.T) { + _, err := parseLxcInfo("") + if err == nil { + t.Fatal("error should not be nil") + } +} + +func TestBadInfo(t *testing.T) { + _, err := parseLxcInfo("state") + if err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/init.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/init.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/init.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,213 @@ +package lxc + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer/netlink" +) + +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Console string + Pipe int + Root string + CapAdd string + CapDrop string +} + +func init() { + // like always lxc requires a hack to get this to work + reexec.Register("/.dockerinit", dockerInititalizer) +} + +func dockerInititalizer() { + initializer() +} + +// initializer is the lxc driver's init function that is run inside the namespace to setup +// additional configurations +func initializer() { + runtime.LockOSThread() + + args := getArgs() + + if err := setupNamespace(args); err != nil { + log.Fatal(err) + } +} + +func setupNamespace(args *InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + if err := setupHostname(args); err != nil { + return err + } + if err := setupNetworking(args); err != nil { + return err + } + if err := finalizeNamespace(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + log.Printf("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + + return nil +} + +func getArgs() *InitArgs { + var ( + // Get cmdline arguments + user = flag.String("u", "", "username or uid") + gateway = flag.String("g", "", "gateway address") + ip = flag.String("i", "", "ip address") + workDir = flag.String("w", "", "workdir") + privileged = flag.Bool("privileged", false, "privileged mode") + mtu = flag.Int("mtu", 1500, "interface mtu") + capAdd = flag.String("cap-add", "", "capabilities to add") + capDrop = flag.String("cap-drop", "", "capabilities to drop") + ) + + flag.Parse() + + return &InitArgs{ + User: *user, + Gateway: *gateway, + Ip: *ip, + WorkDir: *workDir, + Privileged: *privileged, + Args: flag.Args(), + Mtu: *mtu, + CapAdd: *capAdd, + CapDrop: *capDrop, + } +} + +// Clear environment pollution introduced by lxc-start +func setupEnv(args *InitArgs) error { + // Get env + var env []string + content, err := ioutil.ReadFile(".dockerenv") + if err != nil { + return fmt.Errorf("Unable to load environment variables: %v", err) + } + if err := json.Unmarshal(content, &env); err != nil { + return fmt.Errorf("Unable to unmarshal environment variables: %v", err) + } + // Propagate the plugin-specific container env variable + env = append(env, "container="+os.Getenv("container")) + + args.Env = env + + os.Clearenv() + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + parts = append(parts, "") + } + os.Setenv(parts[0], parts[1]) + } + + return nil +} + +func setupHostname(args *InitArgs) error { + hostname := getEnv(args, "HOSTNAME") + if hostname == "" { + return nil + } + return setHostname(hostname) +} + +// Setup networking +func setupNetworking(args *InitArgs) error { + if args.Ip != "" { + // eth0 + iface, err := net.InterfaceByName("eth0") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + ip, ipNet, err := net.ParseCIDR(args.Ip) + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { + return fmt.Errorf("Unable to set MTU: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + + // loopback + iface, err = net.InterfaceByName("lo") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + if args.Gateway != "" { + gw := net.ParseIP(args.Gateway) + if gw == nil { + return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) + } + + if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + + return nil +} + +// Setup working directory +func setupWorkingDirectory(args *InitArgs) error { + if args.WorkDir == "" { + return nil + } + if err := syscall.Chdir(args.WorkDir); err != nil { + return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) + } + return nil +} + +func getEnv(args *InitArgs, key string) string { + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == key && len(parts) == 2 { + return parts[1] + } + } + return "" +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_init_linux.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_init_linux.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_init_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_init_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,78 @@ +package lxc + +import ( + "fmt" + "strings" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/system" + "github.com/docker/libcontainer/utils" +) + +func setHostname(hostname string) error { + return syscall.Sethostname([]byte(hostname)) +} + +func finalizeNamespace(args *InitArgs) error { + if err := utils.CloseExecFrom(3); err != nil { + return err + } + + // We use the native drivers default template so that things like caps are consistent + // across both drivers + container := template.New() + + if !args.Privileged { + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + } + + if err := namespaces.SetupUser(args.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if !args.Privileged { + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + var ( + adds []string + drops []string + ) + + if args.CapAdd != "" { + adds = strings.Split(args.CapAdd, ":") + } + if args.CapDrop != "" { + drops = strings.Split(args.CapDrop, ":") + } + + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + if err != nil { + return err + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(caps); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + } + + if err := setupWorkingDirectory(args); err != nil { + return err + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_init_unsupported.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_init_unsupported.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_init_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_init_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +// +build !linux + +package lxc + +import "github.com/docker/docker/daemon/execdriver" + +func setHostname(hostname string) error { + panic("Not supported on darwin") +} + +func finalizeNamespace(args *execdriver.InitArgs) error { + panic("Not supported on darwin") +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_template.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_template.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_template.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_template.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,151 @@ +package lxc + +import ( + "strings" + "text/template" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/libcontainer/label" +) + +const LxcTemplate = ` +{{if .Network.Interface}} +# network configuration +lxc.network.type = veth +lxc.network.link = {{.Network.Interface.Bridge}} +lxc.network.name = eth0 +lxc.network.mtu = {{.Network.Mtu}} +{{else if .Network.HostNetworking}} +lxc.network.type = none +{{else}} +# network is disabled (-n=false) +lxc.network.type = empty +lxc.network.flags = up +lxc.network.mtu = {{.Network.Mtu}} +{{end}} + +# root filesystem +{{$ROOTFS := .Rootfs}} +lxc.rootfs = {{$ROOTFS}} + +# use a dedicated pts for the container (and limit the number of pseudo terminal +# available) +lxc.pts = 1024 + +# disable the main console +lxc.console = none + +# no controlling tty at all +lxc.tty = 1 + +{{if .ProcessConfig.Privileged}} +lxc.cgroup.devices.allow = a +{{else}} +# no implicit access to devices +lxc.cgroup.devices.deny = a +#Allow the devices passed to us in the AllowedDevices list. +{{range $allowedDevice := .AllowedDevices}} +lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}} +{{end}} +{{end}} + +# standard mount point +# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 +lxc.pivotdir = lxc_putold + +# NOTICE: These mounts must be applied within the namespace + +# WARNING: mounting procfs and/or sysfs read-write is a known attack vector. +# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ +# We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. +# We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. +lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 +lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 + +{{if .ProcessConfig.Tty}} +lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 +{{end}} + +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" ""}} 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0 + +{{range $value := .Mounts}} +{{if $value.Writable}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0 +{{else}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0 +{{end}} +{{end}} + +{{if .ProcessConfig.Privileged}} +{{if .AppArmor}} +lxc.aa_profile = unconfined +{{else}} +# Let AppArmor normal confinement take place (i.e., not unconfined) +{{end}} +{{end}} + +# limits +{{if .Resources}} +{{if .Resources.Memory}} +lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} +lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} +{{with $memSwap := getMemorySwap .Resources}} +lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} +{{end}} +{{end}} +{{if .Resources.CpuShares}} +lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} +{{end}} +{{if .Resources.Cpuset}} +lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}} +{{end}} +{{end}} + +{{if .LxcConfig}} +{{range $value := .LxcConfig}} +lxc.{{$value}} +{{end}} +{{end}} +` + +var LxcTemplateCompiled *template.Template + +// Escape spaces in strings according to the fstab documentation, which is the +// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". +func escapeFstabSpaces(field string) string { + return strings.Replace(field, " ", "\\040", -1) +} + +func getMemorySwap(v *execdriver.Resources) int64 { + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if v.MemorySwap < 0 { + return 0 + } + return v.Memory * 2 +} + +func getLabel(c map[string][]string, name string) string { + label := c["label"] + for _, l := range label { + parts := strings.SplitN(l, "=", 2) + if strings.TrimSpace(parts[0]) == name { + return strings.TrimSpace(parts[1]) + } + } + return "" +} + +func init() { + var err error + funcMap := template.FuncMap{ + "getMemorySwap": getMemorySwap, + "escapeFstabSpaces": escapeFstabSpaces, + "formatMountLabel": label.FormatMountLabel, + } + LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) + if err != nil { + panic(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_template_unit_test.go docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_template_unit_test.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/lxc_template_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/lxc_template_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,142 @@ +// +build linux + +package lxc + +import ( + "bufio" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/libcontainer/devices" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + // Memory is allocated randomly for testing + rand.Seed(time.Now().UTC().UnixNano()) + var ( + memMin = 33554432 + memMax = 536870912 + mem = memMin + rand.Intn(memMax-memMin) + cpuMin = 100 + cpuMax = 10000 + cpu = cpuMin + rand.Intn(cpuMax-cpuMin) + ) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + command := &execdriver.Command{ + ID: "1", + Resources: &execdriver.Resources{ + Memory: int64(mem), + CpuShares: int64(cpu), + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + AllowedDevices: make([]*devices.Device, 0), + ProcessConfig: execdriver.ProcessConfig{}, + } + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + return + } + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} + +func TestEscapeFstabSpaces(t *testing.T) { + var testInputs = map[string]string{ + " ": "\\040", + "": "", + "/double space": "/double\\040\\040space", + "/some long test string": "/some\\040long\\040test\\040string", + "/var/lib/docker": "/var/lib/docker", + " leading": "\\040leading", + "trailing ": "trailing\\040", + } + for in, exp := range testInputs { + if out := escapeFstabSpaces(in); exp != out { + t.Logf("Expected %s got %s", exp, out) + t.Fail() + } + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/MAINTAINERS docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/MAINTAINERS --- docker.io-0.9.1~dfsg1/daemon/execdriver/lxc/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/lxc/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Dinesh Subhraveti (@dineshs-altiscale) diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/MAINTAINERS docker.io-1.3.2~dfsg1/daemon/execdriver/MAINTAINERS --- docker.io-0.9.1~dfsg1/daemon/execdriver/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/create.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/create.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/create.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/create.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,183 @@ +// +build linux,cgo + +package native + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/template" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/security/capabilities" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) { + container := template.New() + + container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env) + container.Tty = c.ProcessConfig.Tty + container.User = c.ProcessConfig.User + container.WorkingDir = c.WorkingDir + container.Env = c.ProcessConfig.Env + container.Cgroups.Name = c.ID + container.Cgroups.AllowedDevices = c.AllowedDevices + container.MountConfig.DeviceNodes = c.AutoCreatedDevices + container.RootFs = c.Rootfs + + // check to see if we are running in ramdisk to disable pivot root + container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + container.RestrictSys = true + + if err := d.createNetwork(container, c); err != nil { + return nil, err + } + + if c.ProcessConfig.Privileged { + if err := d.setPrivileged(container); err != nil { + return nil, err + } + } else { + if err := d.setCapabilities(container, c); err != nil { + return nil, err + } + } + + if c.AppArmorProfile != "" { + container.AppArmorProfile = c.AppArmorProfile + } + + if err := d.setupCgroups(container, c); err != nil { + return nil, err + } + + if err := d.setupMounts(container, c); err != nil { + return nil, err + } + + if err := d.setupLabels(container, c); err != nil { + return nil, err + } + + cmds := make(map[string]*exec.Cmd) + d.Lock() + for k, v := range d.activeContainers { + cmds[k] = v.cmd + } + d.Unlock() + + return container, nil +} + +func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error { + if c.Network.HostNetworking { + container.Namespaces["NEWNET"] = false + return nil + } + + container.Networks = []*libcontainer.Network{ + { + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), + Gateway: "localhost", + Type: "loopback", + }, + } + + if c.Network.Interface != nil { + vethNetwork := libcontainer.Network{ + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + MacAddress: c.Network.Interface.MacAddress, + Gateway: c.Network.Interface.Gateway, + Type: "veth", + Bridge: c.Network.Interface.Bridge, + VethPrefix: "veth", + } + container.Networks = append(container.Networks, &vethNetwork) + } + + if c.Network.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Network.ContainerID] + d.Unlock() + + if active == nil || active.cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) + } + cmd := active.cmd + + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + NsPath: nspath, + }) + } + + return nil +} + +func (d *driver) setPrivileged(container *libcontainer.Config) (err error) { + container.Capabilities = capabilities.GetAllCapabilities() + container.Cgroups.AllowAllDevices = true + + hostDeviceNodes, err := devices.GetHostDeviceNodes() + if err != nil { + return err + } + container.MountConfig.DeviceNodes = hostDeviceNodes + + container.RestrictSys = false + + if apparmor.IsEnabled() { + container.AppArmorProfile = "unconfined" + } + + return nil +} + +func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) { + container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) + return err +} + +func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error { + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemoryReservation = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + container.Cgroups.CpusetCpus = c.Resources.Cpuset + } + + return nil +} + +func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error { + for _, m := range c.Mounts { + container.MountConfig.Mounts = append(container.MountConfig.Mounts, &mount.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Writable: m.Writable, + Private: m.Private, + Slave: m.Slave, + }) + } + + return nil +} + +func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error { + container.ProcessLabel = c.ProcessLabel + container.MountConfig.MountLabel = c.MountLabel + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,311 @@ +// +build linux,cgo + +package native + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/term" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + consolepkg "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/namespaces" + _ "github.com/docker/libcontainer/namespaces/nsenter" + "github.com/docker/libcontainer/system" +) + +const ( + DriverName = "native" + Version = "0.2" +) + +type activeContainer struct { + container *libcontainer.Config + cmd *exec.Cmd +} + +type driver struct { + root string + initPath string + activeContainers map[string]*activeContainer + sync.Mutex +} + +func NewDriver(root, initPath string) (*driver, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root + if err := apparmor.InstallDefaultProfile(); err != nil { + return nil, err + } + + return &driver{ + root: root, + initPath: initPath, + activeContainers: make(map[string]*activeContainer), + }, nil +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + // take the Command and populate the libcontainer.Config from it + container, err := d.createContainer(c) + if err != nil { + return -1, err + } + + var term execdriver.Terminal + + if c.ProcessConfig.Tty { + term, err = NewTtyConsole(&c.ProcessConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) + } + if err != nil { + return -1, err + } + c.ProcessConfig.Terminal = term + + d.Lock() + d.activeContainers[c.ID] = &activeContainer{ + container: container, + cmd: &c.ProcessConfig.Cmd, + } + d.Unlock() + + var ( + dataPath = filepath.Join(d.root, c.ID) + args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...) + ) + + if err := d.createContainerRoot(c.ID); err != nil { + return -1, err + } + defer d.cleanContainer(c.ID) + + if err := d.writeContainerFile(container, c.ID); err != nil { + return -1, err + } + + return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { + c.ProcessConfig.Path = d.initPath + c.ProcessConfig.Args = append([]string{ + DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.root, c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + } + c.ProcessConfig.ExtraFiles = []*os.File{child} + + c.ProcessConfig.Env = container.Env + c.ProcessConfig.Dir = container.RootFs + + return &c.ProcessConfig.Cmd + }, func() { + if startCallback != nil { + c.ContainerPid = c.ProcessConfig.Process.Pid + startCallback(&c.ProcessConfig, c.ContainerPid) + } + }) +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + return syscall.Kill(p.ProcessConfig.Process.Pid, syscall.Signal(sig)) +} + +func (d *driver) Pause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "FROZEN" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + +func (d *driver) Unpause(c *execdriver.Command) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + active.container.Cgroups.Freezer = "THAWED" + if systemd.UseSystemd() { + return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) + } + return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer) +} + +func (d *driver) Terminate(p *execdriver.Command) error { + // lets check the start time for the process + state, err := libcontainer.GetState(filepath.Join(d.root, p.ID)) + if err != nil { + if !os.IsNotExist(err) { + return err + } + // TODO: Remove this part for version 1.2.0 + // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 + data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start")) + if err != nil { + // if we don't have the data on disk then we can assume the process is gone + // because this is only removed after we know the process has stopped + if os.IsNotExist(err) { + return nil + } + return err + } + state = &libcontainer.State{InitStartTime: string(data)} + } + + currentStartTime, err := system.GetProcessStartTime(p.ProcessConfig.Process.Pid) + if err != nil { + return err + } + + if state.InitStartTime == currentStartTime { + err = syscall.Kill(p.ProcessConfig.Process.Pid, 9) + syscall.Wait4(p.ProcessConfig.Process.Pid, nil, 0, nil) + } + d.cleanContainer(p.ID) + + return err + +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + d.Lock() + active := d.activeContainers[id] + d.Unlock() + + if active == nil { + return nil, fmt.Errorf("active container for %s does not exist", id) + } + c := active.container.Cgroups + + if systemd.UseSystemd() { + return systemd.GetPids(c) + } + return fs.GetPids(c) +} + +func (d *driver) writeContainerFile(container *libcontainer.Config, id string) error { + data, err := json.Marshal(container) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) +} + +func (d *driver) cleanContainer(id string) error { + d.Lock() + delete(d.activeContainers, id) + d.Unlock() + return os.RemoveAll(filepath.Join(d.root, id, "container.json")) +} + +func (d *driver) createContainerRoot(id string) error { + return os.MkdirAll(filepath.Join(d.root, id), 0655) +} + +func (d *driver) Clean(id string) error { + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func getEnv(key string, env []string) string { + for _, pair := range env { + parts := strings.Split(pair, "=") + if parts[0] == key { + return parts[1] + } + } + return "" +} + +type TtyConsole struct { + MasterPty *os.File +} + +func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { + ptyMaster, console, err := consolepkg.CreateMasterAndConsole() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + } + + if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + processConfig.Console = console + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + + return nil +} + +func (t *TtyConsole) Close() error { + return t.MasterPty.Close() +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver_unsupported.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver_unsupported.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +// +build !linux + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver_unsupported_nocgo.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver_unsupported_nocgo.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/driver_unsupported_nocgo.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/driver_unsupported_nocgo.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +// +build linux,!cgo + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/exec.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/exec.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/exec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/exec.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,70 @@ +// +build linux + +package native + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +const execCommandName = "nsenter-exec" + +func init() { + reexec.Register(execCommandName, nsenterExec) +} + +func nsenterExec() { + runtime.LockOSThread() + + // User args are passed after '--' in the command line. + userArgs := findUserArgs() + + config, err := loadConfigFromFd() + if err != nil { + log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err) + } + + if err := namespaces.FinalizeSetns(config, userArgs); err != nil { + log.Fatalf("docker-exec: failed to exec: %s", err) + } +} + +// TODO(vishh): Add support for running in priviledged mode and running as a different user. +func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + active := d.activeContainers[c.ID] + if active == nil { + return -1, fmt.Errorf("No active container exists with ID %s", c.ID) + } + state, err := libcontainer.GetState(filepath.Join(d.root, c.ID)) + if err != nil { + return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err) + } + + var term execdriver.Terminal + + if processConfig.Tty { + term, err = NewTtyConsole(processConfig, pipes) + } else { + term, err = execdriver.NewStdConsole(processConfig, pipes) + } + + processConfig.Terminal = term + + args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...) + + return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console, + func(cmd *exec.Cmd) { + if startCallback != nil { + startCallback(&c.ProcessConfig, cmd.Process.Pid) + } + }) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/info.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/info.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/info.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/info.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +// +build linux,cgo + +package native + +import ( + "os" + "path/filepath" + + "github.com/docker/libcontainer" +) + +type info struct { + ID string + driver *driver +} + +// IsRunning is determined by looking for the +// pid file for a container. If the file exists then the +// container is currently running +func (i *info) IsRunning() bool { + if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil { + return true + } + // TODO: Remove this part for version 1.2.0 + // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 + if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { + return true + } + return false +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/init.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/init.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/init.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,66 @@ +// +build linux + +package native + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +func init() { + reexec.Register(DriverName, initializer) +} + +func initializer() { + runtime.LockOSThread() + + var ( + pipe = flag.Int("pipe", 0, "sync pipe fd") + console = flag.String("console", "", "console (pty slave) path") + root = flag.String("root", ".", "root path for configuration files") + ) + + flag.Parse() + + var container *libcontainer.Config + f, err := os.Open(filepath.Join(*root, "container.json")) + if err != nil { + writeError(err) + } + + if err := json.NewDecoder(f).Decode(&container); err != nil { + f.Close() + writeError(err) + } + f.Close() + + rootfs, err := os.Getwd() + if err != nil { + writeError(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe)) + if err != nil { + writeError(err) + } + + if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil { + writeError(err) + } + + panic("Unreachable") +} + +func writeError(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/template/default_template.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/template/default_template.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/template/default_template.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/template/default_template.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,47 @@ +package template + +import ( + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups" +) + +// New returns the docker default configuration for libcontainer +func New() *libcontainer.Config { + container := &libcontainer.Config{ + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: map[string]bool{ + "NEWNS": true, + "NEWUTS": true, + "NEWIPC": true, + "NEWPID": true, + "NEWNET": true, + }, + Cgroups: &cgroups.Cgroup{ + Parent: "docker", + AllowAllDevices: false, + }, + MountConfig: &libcontainer.MountConfig{}, + } + + if apparmor.IsEnabled() { + container.AppArmorProfile = "docker-default" + } + + return container +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/native/utils.go docker.io-1.3.2~dfsg1/daemon/execdriver/native/utils.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/native/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/native/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,35 @@ +// +build linux + +package native + +import ( + "os" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/syncpipe" +) + +func findUserArgs() []string { + for i, a := range os.Args { + if a == "--" { + return os.Args[i+1:] + } + } + return []string{} +} + +// loadConfigFromFd loads a container's config from the sync pipe that is provided by +// fd 3 when running a process +func loadConfigFromFd() (*libcontainer.Config, error) { + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) + if err != nil { + return nil, err + } + + var config *libcontainer.Config + if err := syncPipe.ReadFromParent(&config); err != nil { + return nil, err + } + + return config, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/pipes.go docker.io-1.3.2~dfsg1/daemon/execdriver/pipes.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/pipes.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/pipes.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +package execdriver + +import ( + "io" +) + +// Pipes is a wrapper around a containers output for +// stdin, stdout, stderr +type Pipes struct { + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { + p := &Pipes{ + Stdout: stdout, + Stderr: stderr, + } + if useStdin { + p.Stdin = stdin + } + return p +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/termconsole.go docker.io-1.3.2~dfsg1/daemon/execdriver/termconsole.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/termconsole.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/termconsole.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package execdriver + +import ( + "io" + "os/exec" +) + +type StdConsole struct { +} + +func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) { + std := &StdConsole{} + + if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil { + return nil, err + } + return std, nil +} + +func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = pipes.Stdout + command.Stderr = pipes.Stderr + + if pipes.Stdin != nil { + stdin, err := command.StdinPipe() + if err != nil { + return err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, pipes.Stdin) + }() + } + return nil +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to reside a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/execdriver/utils.go docker.io-1.3.2~dfsg1/daemon/execdriver/utils.go --- docker.io-0.9.1~dfsg1/daemon/execdriver/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/execdriver/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,63 @@ +package execdriver + +import ( + "fmt" + "strings" + + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/security/capabilities" +) + +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = capabilities.GetAllCapabilities() + ) + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if utils.StringsContainsNoCase(adds, "all") { + basics = capabilities.GetAllCapabilities() + } + + if !utils.StringsContainsNoCase(drops, "all") { + for _, cap := range basics { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !utils.StringsContainsNoCase(drops, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !utils.StringsContainsNoCase(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + + return newCaps, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/exec.go docker.io-1.3.2~dfsg1/daemon/exec.go --- docker.io-0.9.1~dfsg1/daemon/exec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/exec.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,301 @@ +// build linux + +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +type execConfig struct { + sync.Mutex + ID string + Running bool + ProcessConfig execdriver.ProcessConfig + StreamConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + Container *Container +} + +type execStore struct { + s map[string]*execConfig + sync.Mutex +} + +func newExecStore() *execStore { + return &execStore{s: make(map[string]*execConfig, 0)} +} + +func (e *execStore) Add(id string, execConfig *execConfig) { + e.Lock() + e.s[id] = execConfig + e.Unlock() +} + +func (e *execStore) Get(id string) *execConfig { + e.Lock() + res := e.s[id] + e.Unlock() + return res +} + +func (e *execStore) Delete(id string) { + e.Lock() + delete(e.s, id) + e.Unlock() +} + +func (execConfig *execConfig) Resize(h, w int) error { + return execConfig.ProcessConfig.Terminal.Resize(h, w) +} + +func (d *Daemon) registerExecCommand(execConfig *execConfig) { + // Storing execs in container inorder to kill them gracefully whenever the container is stopped or removed. + execConfig.Container.execCommands.Add(execConfig.ID, execConfig) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(execConfig.ID, execConfig) +} + +func (d *Daemon) getExecConfig(name string) (*execConfig, error) { + if execConfig := d.execCommands.Get(name); execConfig != nil { + if !execConfig.Container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) + } + return execConfig, nil + } + + return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) +} + +func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { + execConfig.Container.execCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*Container, error) { + container := d.Get(name) + + if container == nil { + return nil, fmt.Errorf("No such container: %s", name) + } + + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running", name) + } + + return container, nil +} + +func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s [options] container command [args]", job.Name) + } + + if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) { + return job.Error(lxc.ErrExec) + } + + var name = job.Args[0] + + container, err := d.getActiveContainer(name) + if err != nil { + return job.Error(err) + } + + config := runconfig.ExecConfigFromJob(job) + + entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) + + processConfig := execdriver.ProcessConfig{ + Privileged: config.Privileged, + User: config.User, + Tty: config.Tty, + Entrypoint: entrypoint, + Arguments: args, + } + + execConfig := &execConfig{ + ID: utils.GenerateRandomID(), + OpenStdin: config.AttachStdin, + OpenStdout: config.AttachStdout, + OpenStderr: config.AttachStderr, + StreamConfig: StreamConfig{}, + ProcessConfig: processConfig, + Container: container, + Running: false, + } + + d.registerExecCommand(execConfig) + + job.Printf("%s\n", execConfig.ID) + + return engine.StatusOK +} + +func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s [options] exec", job.Name) + } + + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + execName = job.Args[0] + ) + + execConfig, err := d.getExecConfig(execName) + if err != nil { + return job.Error(err) + } + + func() { + execConfig.Lock() + defer execConfig.Unlock() + if execConfig.Running { + err = fmt.Errorf("Error: Exec command %s is already running", execName) + } + execConfig.Running = true + }() + if err != nil { + return job.Error(err) + } + + log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) + container := execConfig.Container + + if execConfig.OpenStdin { + r, w := io.Pipe() + go func() { + defer w.Close() + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if execConfig.OpenStdout { + cStdout = job.Stdout + } + if execConfig.OpenStderr { + cStderr = job.Stderr + } + + execConfig.StreamConfig.stderr = broadcastwriter.New() + execConfig.StreamConfig.stdout = broadcastwriter.New() + // Attach to stdin + if execConfig.OpenStdin { + execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() + } else { + execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + + attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) + + execErr := make(chan error) + + // Remove exec from daemon and container. + defer d.unregisterExecCommand(execConfig) + + go func() { + err := container.Exec(execConfig) + if err != nil { + execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) + } + }() + + select { + case err := <-attachErr: + if err != nil { + return job.Errorf("attach failed with error: %s", err) + } + break + case err := <-execErr: + return job.Error(err) + } + + return engine.StatusOK +} + +func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) +} + +func (container *Container) Exec(execConfig *execConfig) error { + container.Lock() + defer container.Unlock() + + waitStart := make(chan struct{}) + + callback := func(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + close(waitStart) + } + + // We use a callback here instead of a goroutine and an chan for + // syncronization purposes + cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) + + // Exec should not return until the process is actually running + select { + case <-waitStart: + case err := <-cErr: + return err + } + + return nil +} + +func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { + var ( + err error + exitCode int + ) + + pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) + exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) + if err != nil { + log.Errorf("Error running command in existing container %s: %s", container.ID, err) + } + + log.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) + if execConfig.OpenStdin { + if err := execConfig.StreamConfig.stdin.Close(); err != nil { + log.Errorf("Error closing stdin while running in %s: %s", container.ID, err) + } + } + if err := execConfig.StreamConfig.stdout.Clean(); err != nil { + log.Errorf("Error closing stdout while running in %s: %s", container.ID, err) + } + if err := execConfig.StreamConfig.stderr.Clean(); err != nil { + log.Errorf("Error closing stderr while running in %s: %s", container.ID, err) + } + if execConfig.ProcessConfig.Terminal != nil { + if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { + log.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) + } + } + + return err +} diff -Nru docker.io-0.9.1~dfsg1/daemon/export.go docker.io-1.3.2~dfsg1/daemon/export.go --- docker.io-0.9.1~dfsg1/daemon/export.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/export.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + data, err := container.Export() + if err != nil { + return job.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Errorf("%s: %s", name, err) + } + // FIXME: factor job-specific LogEvent to engine.Job.Run() + container.LogEvent("export") + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/aufs.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/aufs.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/aufs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/aufs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,450 @@ +/* + +aufs driver directory structure + +. +├── layers // Metadata of layers +│   ├── 1 +│   ├── 2 +│   └── 3 +├── diff // Content of the layer +│   ├── 1 // Contains layers that need to be mounted for the id +│   ├── 2 +│   └── 3 +└── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/label" +) + +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type Driver struct { + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string) (graphdriver.Driver, error) { + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + rootdir := path.Dir(root) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, fmt.Errorf("Couldn't stat the root directory: %s", err) + } + + for _, magic := range incompatibleFsMagic { + if graphdriver.FsMagic(buf.Type) == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]int), + } + + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := os.MkdirAll(root, 0755); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := graphdriver.MakePrivate(root); err != nil { + return nil, err + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a Driver) rootPath() string { + return a.root +} + +func (Driver) String() string { + return "aufs" +} + +func (a Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + } +} + +// Exists returns true if the given id is registered with +// this driver +func (a Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Three folders are created for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { + return err + } + } + return nil +} + +// Unmount and remove the dir information +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + log.Errorf("Warning: removing active id %s", id) + } + + // Make sure the dir is umounted first + if err := a.unmount(id); err != nil { + return err + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Return the rootfs path for the id +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + out := path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + out = path.Join(a.rootPath(), "mnt", id) + + if count == 0 { + if err := a.mount(id, mountLabel); err != nil { + return "", err + } + } + } + + a.active[id] = count + 1 + + return out, nil +} + +func (a *Driver) Put(id string) { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + }) +} + +func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { + return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (bytes int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id, mountLabel string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(id); err != nil || mounted { + return err + } + + var ( + target = path.Join(a.rootPath(), "mnt", id) + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return err + } + return nil +} + +func (a *Driver) unmount(id string) error { + if mounted, err := a.mounted(id); err != nil || !mounted { + return err + } + target := path.Join(a.rootPath(), "mnt", id) + return Unmount(target) +} + +func (a *Driver) mounted(id string) (bool, error) { + target := path.Join(a.rootPath(), "mnt", id) + return mountpk.Mounted(target) +} + +// During cleanup aufs needs to unmount all mountpoints +func (a *Driver) Cleanup() error { + ids, err := loadIds(path.Join(a.rootPath(), "layers")) + if err != nil { + return err + } + + for _, id := range ids { + if err := a.unmount(id); err != nil { + log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) + } + } + + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + if err = a.tryMount(ro, rw, target, mountLabel); err != nil { + if err = a.mountRw(rw, target, mountLabel); err != nil { + return + } + + for _, layer := range ro { + data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } + } + } + return +} + +// Try to mount using the aufs fast path, if this fails then +// append ro layers. +func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { + var ( + rwBranch = fmt.Sprintf("%s=rw", rw) + roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) + data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) + ) + return mount("none", target, "aufs", 0, data) +} + +func (a *Driver) mountRw(rw, target, mountLabel string) error { + data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) + return mount("none", target, "aufs", 0, data) +} + +func rollbackMount(target string, err error) { + if err != nil { + Unmount(target) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/aufs_test.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/aufs_test.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/aufs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/aufs_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,703 @@ +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +var ( + tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t *testing.T) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + response, err := d.mounted("1") + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker"); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id name should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[1] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func TestMountMoreThan42Layers(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.Create(current, parent); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Fatal(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Fatal(err) + } + if len(files) != expected { + t.Fatalf("Expected %d got %d", expected, len(files)) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/dirs.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/dirs.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/dirs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/dirs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/migrate.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/migrate.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/migrate.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/migrate.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,194 @@ +package aufs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type metadata struct { + ID string `json:"id"` + ParentID string `json:"parent,omitempty"` + Image string `json:"Image,omitempty"` + + parent *metadata +} + +func pathExists(pth string) bool { + if _, err := os.Stat(pth); err != nil { + return false + } + return true +} + +// Migrate existing images and containers from docker < 0.7.x +// +// The format pre 0.7 is for docker to store the metadata and filesystem +// content in the same directory. For the migration to work we need to move Image layer +// data from /var/lib/docker/graph//layers to the diff of the registered id. +// +// Next we need to migrate the container's rw layer to diff of the driver. After the +// contents are migrated we need to register the image and container ids with the +// driver. +// +// For the migration we try to move the folder containing the layer files, if that +// fails because the data is currently mounted we will fallback to creating a +// symlink. +func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { + if pathExists(path.Join(pth, "graph")) { + if err := a.migrateRepositories(pth); err != nil { + return err + } + if err := a.migrateImages(path.Join(pth, "graph")); err != nil { + return err + } + return a.migrateContainers(path.Join(pth, "containers"), setupInit) + } + return nil +} + +func (a *Driver) migrateRepositories(pth string) error { + name := path.Join(pth, "repositories") + if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { + if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { + return err + } + + if !a.Exists(id) { + + metadata, err := loadMetadata(path.Join(pth, id, "config.json")) + if err != nil { + return err + } + + initID := fmt.Sprintf("%s-init", id) + if err := a.Create(initID, metadata.Image); err != nil { + return err + } + + initPath, err := a.Get(initID, "") + if err != nil { + return err + } + // setup init layer + if err := setupInit(initPath); err != nil { + return err + } + + if err := a.Create(id, initID); err != nil { + return err + } + } + } + } + return nil +} + +func (a *Driver) migrateImages(pth string) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + var ( + m = make(map[string]*metadata) + current *metadata + exists bool + ) + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { + if current, exists = m[id]; !exists { + current, err = loadMetadata(path.Join(pth, id, "json")) + if err != nil { + return err + } + m[id] = current + } + } + } + + for _, v := range m { + v.parent = m[v.ParentID] + } + + migrated := make(map[string]bool) + for _, v := range m { + if err := a.migrateImage(v, pth, migrated); err != nil { + return err + } + } + return nil +} + +func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { + if !migrated[m.ID] { + if m.parent != nil { + a.migrateImage(m.parent, pth, migrated) + } + if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { + return err + } + if !a.Exists(m.ID) { + if err := a.Create(m.ID, m.ParentID); err != nil { + return err + } + } + migrated[m.ID] = true + } + return nil +} + +// tryRelocate will try to rename the old path to the new pack and if +// the operation fails, it will fallback to a symlink +func tryRelocate(oldPath, newPath string) error { + s, err := os.Lstat(newPath) + if err != nil && !os.IsNotExist(err) { + return err + } + // If the destination is a symlink then we already tried to relocate once before + // and it failed so we delete it and try to remove + if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { + if err := os.RemoveAll(newPath); err != nil { + return err + } + } + if err := os.Rename(oldPath, newPath); err != nil { + if sErr := os.Symlink(oldPath, newPath); sErr != nil { + return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) + } + } + return nil +} + +func loadMetadata(pth string) (*metadata, error) { + f, err := os.Open(pth) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + out = &metadata{} + dec = json.NewDecoder(f) + ) + + if err := dec.Decode(out); err != nil { + return nil, err + } + return out, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + log.Errorf("[warning]: couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount_linux.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount_linux.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,9 @@ +package aufs + +import "syscall" + +const MsRemount = syscall.MS_REMOUNT + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount_unsupported.go docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount_unsupported.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/aufs/mount_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/aufs/mount_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +// +build !linux + +package aufs + +import "errors" + +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on darwin") +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/btrfs.go docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/btrfs.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/btrfs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/btrfs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,225 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + } + + return graphdriver.NaiveDiffDriver(driver), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent, "") + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/btrfs_test.go docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/btrfs_test.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/btrfs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/btrfs_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,28 @@ +package btrfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/dummy_unsupported.go docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/dummy_unsupported.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/dummy_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/dummy_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/MAINTAINERS docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/MAINTAINERS --- docker.io-0.9.1~dfsg1/daemon/graphdriver/btrfs/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/btrfs/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/attach_loopback.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/attach_loopback.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/attach_loopback.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/attach_loopback.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,129 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + log.Errorf("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + log.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + log.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func attachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + log.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + log.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + log.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/deviceset.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/deviceset.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/deviceset.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/deviceset.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,1253 @@ +// +build linux + +package devmapper + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" + "github.com/docker/libcontainer/label" +) + +var ( + DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors +) + +type DevInfo struct { + Hash string `json:"-"` + DeviceId int `json:"device_id"` + Size uint64 `json:"size"` + TransactionId uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be aquired *before* the device lock, and + // multiple device locks should be aquired parent before child. + lock sync.Mutex `json:"-"` +} + +type MetaData struct { + Devices map[string]*DevInfo `json:"Devices"` + devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map +} + +type DeviceSet struct { + MetaData + sync.Mutex // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 + NewTransactionId uint64 + nextDeviceId int + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string + metadataDevice string + doBlkDiscard bool + thinpBlockSize uint32 +} + +type DiskUsage struct { + Used uint64 + Total uint64 +} + +type Status struct { + PoolName string + DataLoopback string + MetadataLoopback string + Data DiskUsage + Metadata DiskUsage + SectorSize uint64 +} + +type DevStatus struct { + DeviceId int + Size uint64 + TransactionId uint64 + SizeInSectors uint64 + MappedSectors uint64 + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *DevInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *DevInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + return devices.devicePrefix + "-pool" +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists, it does nothing. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { + return "", err + } + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + log.Debugf("Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err = file.Truncate(size); err != nil { + return "", err + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionId() uint64 { + devices.NewTransactionId = devices.NewTransactionId + 1 + return devices.NewTransactionId +} + +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + if devices.NewTransactionId != devices.TransactionId { + if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transition ID: %s", err) + } + devices.TransactionId = devices.NewTransactionId + } + return nil +} + +func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + devices.devicesLock.Lock() + defer devices.devicesLock.Unlock() + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { + log.Debugf("registerDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + Size: size, + TransactionId: devices.allocateTransactionId(), + Initialized: false, + devices: devices, + } + + devices.devicesLock.Lock() + devices.Devices[hash] = info + devices.devicesLock.Unlock() + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { + log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) + + if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) +} + +func (devices *DeviceSet) createFilesystem(info *DevInfo) error { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + var err error + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) + } + if err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) initMetaData() error { + _, _, _, params, err := getStatus(devices.getPoolName()) + if err != nil { + return err + } + + if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { + return err + } + devices.NewTransactionId = devices.TransactionId + + // Migrate old metadatafile + + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId <= devices.TransactionId { + devices.saveMetadata(info) + } + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId > devices.TransactionId { + return nil + } + + return info +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDevice("") + if oldInfo != nil && oldInfo.Initialized { + return nil + } + + if oldInfo != nil && !oldInfo.Initialized { + log.Debugf("Removing uninitialized base image") + if err := devices.deleteDevice(oldInfo); err != nil { + return err + } + } + + log.Debugf("Initializing base device-manager snapshot") + + id := devices.nextDeviceId + + // Create initial device + if err := createDevice(devices.getPoolDevName(), &id); err != nil { + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (id + 1) & 0xffffff + + log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) + info, err := devices.registerDevice(id, "", devices.baseFsSize) + if err != nil { + _ = deleteDevice(devices.getPoolDevName(), id) + return err + } + + log.Debugf("Creating filesystem on base device-manager snapshot") + + if err = devices.activateDeviceIfNeeded(info); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err = devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { + if level >= 7 { + return // Ignore _LOG_DEBUG + } + + log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("Can't shrink file") + } + + dataloopback := FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := LoopbackSetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := suspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("Unable to reload pool: %s", err) + } + + // Resume the pool + if err := resumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + logInit(devices) + + _, err := getDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + log.Debugf("Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the device -pool + log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + info, err := getInfo(devices.getPoolName()) + if info == nil { + log.Debugf("Error device getInfo: %s", err) + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if info.Exists == 0 { + log.Debugf("Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = attachLoopDevice(data) + if err != nil { + return err + } + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = attachLoopDevice(metadata) + if err != nil { + return err + } + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err = devices.initMetaData(); err != nil { + return err + } + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + log.Debugf("Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + baseInfo, err := devices.lookupDevice(baseHash) + if err != nil { + return err + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) + } + + deviceId := devices.nextDeviceId + + if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + log.Debugf("Error creating snap device: %s", err) + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (deviceId + 1) & 0xffffff + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { + deleteDevice(devices.getPoolDevName(), deviceId) + log.Debugf("Error registering device: %s", err) + return err + } + return nil +} + +func (devices *DeviceSet) deleteDevice(info *DevInfo) error { + if devices.doBlkDiscard { + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(info); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + log.Debugf("Error discarding block on device: %s (ignoring)", err) + } + } + } + + devinfo, _ := getInfo(info.Name()) + if devinfo != nil && devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + log.Debugf("Error removing device: %s", err) + return err + } + } + + if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + log.Debugf("Error deleting device: %s", err) + return err + } + + devices.allocateTransactionId() + devices.devicesLock.Lock() + delete(devices.Devices, info.Hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + devices.devicesLock.Lock() + devices.Devices[info.Hash] = info + devices.devicesLock.Unlock() + log.Debugf("Error removing meta data: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) DeleteDevice(hash string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info) +} + +func (devices *DeviceSet) deactivatePool() error { + log.Debugf("[devmapper] deactivatePool()") + defer log.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() + devinfo, err := getInfo(devname) + if err != nil { + return err + } + if devinfo.Exists != 0 { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { + log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) + defer log.Debugf("[devmapper] deactivateDevice END") + + // Wait for the unmount to be effective, + // by watching the value of Info.OpenCount for the device + if err := devices.waitClose(info); err != nil { + log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) + } + + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + return err + } + } + + return nil +} + +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 1000; i++ { + err = removeDevice(devname) + if err == nil { + break + } + if err != ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if err != nil { + return err + } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(devname) + if err != nil { + // If there is an error we assume the device doesn't exist. + // The error might actually be something else, but we can't differentiate. + return nil + } + if i%100 == 0 { + log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) + } + if devinfo.Exists == 0 { + break + } + + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) + } + return nil +} + +// waitClose blocks until either: +// a) the device registered at - is closed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitClose(info *DevInfo) error { + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if i%100 == 0 { + log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) + } + if devinfo.OpenCount == 0 { + break + } + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) + } + return nil +} + +func (devices *DeviceSet) Shutdown() error { + + log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) + log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + + var devs []*DevInfo + + devices.devicesLock.Lock() + for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.devicesLock.Unlock() + + for _, info := range devs { + info.lock.Lock() + if info.mountCount > 0 { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) + } + + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) + } + devices.Unlock() + } + info.lock.Unlock() + } + + info, _ := devices.lookupDevice("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if err := devices.deactivatePool(); err != nil { + log.Debugf("Shutdown deactivate pool , error: %s", err) + } + devices.Unlock() + + return nil +} + +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + info.mountCount++ + return nil + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + var flags uintptr = syscall.MS_MGC_VAL + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options)) + if err != nil && err == syscall.EINVAL { + err = syscall.Mount(info.DevName(), path, fstype, flags, options) + } + if err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + + return nil +} + +func (devices *DeviceSet) UnmountDevice(hash string) error { + log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) + defer log.Debugf("[devmapper] UnmountDevice END") + + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + log.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := syscall.Unmount(info.mountPath, 0); err != nil { + return err + } + log.Debugf("[devmapper] Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + info.mountPath = "" + + return nil +} + +func (devices *DeviceSet) HasDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info, _ := devices.lookupDevice(hash) + return info != nil +} + +func (devices *DeviceSet) HasActivatedDevice(hash string) bool { + info, _ := devices.lookupDevice(hash) + if info == nil { + return false + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + devinfo, _ := getInfo(info.Name()) + return devinfo != nil && devinfo.Exists != 0 +} + +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + devices.devicesLock.Lock() + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + devices.devicesLock.Unlock() + + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = getStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceId: info.DeviceId, + Size: info.Size, + TransactionId: info.TransactionId, + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { + return nil, err + } else { + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + } + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + if len(devices.dataDevice) > 0 { + status.DataLoopback = devices.dataDevice + } else { + status.DataLoopback = path.Join(devices.loopbackDir(), "data") + } + if len(devices.metadataDevice) > 0 { + status.MetadataLoopback = devices.metadataDevice + } else { + status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") + } + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + + status.SectorSize = blockSizeInSectors * 512 + } + + return status +} + +func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { + SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + filesystem: "ext4", + doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && devices.dataDevice != "" { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_doc.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_doc.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_doc.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_doc.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognised ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,646 @@ +// +build linux + +package devmapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +type DevmapperLogger interface { + log(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool +) + +type ( + Task struct { + unmanaged *CDmTask + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + } + TaskType int + AddNodeType int +) + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + log.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + log.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie uint) error { + if res := DmUdevWait(cookie); res != 1 { + log.Debugf("Failed to wait on udev cookie %d", cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +func logInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + log.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + task := TaskCreate(DeviceRemove) + if task == nil { + return ErrCreateRemoveTask + } + if err := task.SetName(name); err != nil { + log.Debugf("Can't set task name %s", name) + return err + } + if err := task.Run(); err != nil { + return ErrRunRemoveDevice + } + return nil +} + +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + log.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate %s", err) + } + + return nil +} + +func createTask(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +func getInfo(name string) (*Info, error) { + task, err := createTask(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func getDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + +func getStatus(name string) (uint64, uint64, string, string, error) { + task, err := createTask(DeviceStatus, name) + if task == nil { + log.Debugf("getStatus: Error createTask: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + log.Debugf("getStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + log.Debugf("getStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + log.Debugf("getStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(0) + return start, length, targetType, params, nil +} + +func setTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running setTransactionId %s", err) + } + return nil +} + +func suspendDevice(name string) error { + task, err := createTask(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend %s", err) + } + return nil +} + +func resumeDevice(name string) error { + task, err := createTask(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createDevice(poolName string, deviceId *int) error { + log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running createDevice %s", err) + } + break + } + return nil +} + +func deleteDevice(poolName string, deviceId int) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running deleteDevice %s", err) + } + return nil +} + +func removeDevice(name string) error { + log.Debugf("[devmapper] removeDevice START") + defer log.Debugf("[devmapper] removeDevice END") + task, err := createTask(DeviceRemove, name) + if task == nil { + return err + } + dmSawBusy = false + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running removeDevice %s", err) + } + return nil +} + +func activateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := createTask(DeviceCreate, name) + if task == nil { + return err + } + + params := fmt.Sprintf("%s %d", poolName, deviceId) + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { + devinfo, _ := getInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := suspendDevice(baseName); err != nil { + return err + } + } + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) + } + + break + } + + if doSuspend { + if err := resumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_log.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_log.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_log.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_log.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +// +build linux + +package devmapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + + if dmLogger != nil { + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_test.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_test.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,37 @@ +// +build linux + +package devmapper + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_wrapper.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_wrapper.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/devmapper_wrapper.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/devmapper_wrapper.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,240 @@ +// +build linux + +package devmapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "unsafe" +) + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) + return uintptr(nextp) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/driver.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/driver.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,151 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Placeholder interfaces, to be replaced +// at integration. + +// End of placeholder interfaces. + +type Driver struct { + *DeviceSet + home string +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options) + if err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + } + + return graphdriver.NaiveDiffDriver(d), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))}, + {"Data file", s.DataLoopback}, + {"Metadata file", s.MetadataLoopback}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, + } + if vStr, err := GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +func (d *Driver) Create(id, parent string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + return nil +} + +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { + d.DeviceSet.UnmountDevice(id) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id) + return "", err + } + } + + return rootFs, nil +} + +func (d *Driver) Put(id string) { + if err := d.DeviceSet.UnmountDevice(id); err != nil { + log.Errorf("Warning: error unmounting device %s: %s", id, err) + } +} + +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/ioctl.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/ioctl.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/ioctl.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/ioctl.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,72 @@ +// +build linux + +package devmapper + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/MAINTAINERS docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/MAINTAINERS --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/mount.go docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/mount.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/mount.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/mount.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,86 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + file.Close() + if uint64(l) != maxLen { + return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/README.md docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/README.md --- docker.io-0.9.1~dfsg1/daemon/graphdriver/devmapper/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/devmapper/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. For each devicemapper +graph location (typically `/var/lib/docker/devicemapper`, $graph below) +a thin pool is created based on two block devices, one for data and +one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are `$graph/devicemapper/data` and +`$graph/devicemapper/metadata`. Additional metadata required to map +from docker entities to the corresponding devicemapper volumes is +stored in the `$graph/devicemapper/json` file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the $graph directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. + +Here is the list of supported options: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which + limits the size of images and containers. The default value is + 10G. Note, thin devices are inherently "sparse", so a 10G device + which is mostly empty doesn't use 10 GB of space on the + pool. However, the filesystem will use more space for the empty + case the larger the device is. **Warning**: This value affects the + system-wide "base" empty filesystem that may already be + initialized and inherited by pulled images. Typically, a change + to this value will require additional steps to take effect: 1) + stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`. + + Example use: + + ``docker -d --storage-opt dm.basesize=20G`` + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopdatasize=200G`` + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadadata" device which is used for the thin pool. The default size is + 2G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopmetadatasize=4G`` + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + ``docker -d --storage-opt dm.fs=xfs`` + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"`` + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ``docker -d --storage-opt dm.mountopt=nodiscard`` + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both + datadev and metadatadev should be specified to completely avoid + using the loopback device. + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin + pool. + + For best performance the metadata should be on a different spindle + than the data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This + can be achieved by zeroing the first 4k to indicate empty + metadata, like this: + + ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ``docker -d --storage-opt dm.blocksize=512K`` + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing + devicemapper devices. This is enabled by default (only) if using + loopback devices and is required to res-parsify the loopback file + on image/container removal. + + Disabling this on loopback can lead to *much* faster container + removal times, but will make the space used in /var/lib/docker + directory not be returned to the system for other use when + containers are removed. + + Example use: + + ``docker -d --storage-opt dm.blkdiscard=false`` diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/driver.go docker.io-1.3.2~dfsg1/daemon/graphdriver/driver.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/mount" +) + +type FsMagic uint64 + +const ( + FsMagicBtrfs = FsMagic(0x9123683E) + FsMagicAufs = FsMagic(0x61756673) +) + +type InitFunc func(root string, options []string) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // Create creates a new, empty, filesystem layer with the + // specified id and parent. Parent may be "". + Create(id, parent string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (bytes int64, err error) +} + +var ( + DefaultDriver string + // All registred drivers + drivers map[string]InitFunc + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "devicemapper", + "vfs", + } + + ErrNotSupported = errors.New("driver not supported") + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +func init() { + drivers = make(map[string]InitFunc) +} + +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +func GetDriver(name, home string, options []string) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(path.Join(home, name), options) + } + return nil, ErrNotSupported +} + +func New(root string, options []string) (driver Driver, err error) { + for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { + if name != "" { + return GetDriver(name, root, options) + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err = GetDriver(name, root, options) + if err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for _, initFunc := range drivers { + if driver, err = initFunc(root, options); err != nil { + if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +func MakePrivate(mountPoint string) error { + mounted, err := mount.Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + + return mount.ForceMount("", mountPoint, "none", "private") +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/fsdiff.go docker.io-1.3.2~dfsg1/daemon/graphdriver/fsdiff.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/fsdiff.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/fsdiff.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,166 @@ +package graphdriver + +import ( + "fmt" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +// naiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type naiveDiffDriver struct { + ProtoDriver +} + +// NaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) +// DiffSize(id, parent string) (bytes int64, err error) +func NaiveDiffDriver(driver ProtoDriver) Driver { + return &naiveDiffDriver{ProtoDriver: driver} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + start := time.Now().UTC() + log.Debugf("Start untar layer") + if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil { + return + } + log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if parent == "" { + return utils.TreeSize(layerFs) + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + err = fmt.Errorf("Driver %s failed to get image parent %s: %s", driver, parent, err) + return + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return + } + + return archive.ChangesSize(layerFs, changes), nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *naiveDiffDriver) DiffSize(id, parent string) (bytes int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/graphtest/graphtest.go docker.io-1.3.2~dfsg1/daemon/graphdriver/graphtest/graphtest.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/graphtest/graphtest.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/graphtest/graphtest.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,229 @@ +package graphtest + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil) + if err != nil { + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { + t.Skip("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/vfs/driver.go docker.io-1.3.2~dfsg1/daemon/graphdriver/vfs/driver.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/vfs/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/vfs/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,104 @@ +package vfs + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/libcontainer/label" +) + +func init() { + graphdriver.Register("vfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + } + return graphdriver.NaiveDiffDriver(d), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "vfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func isGNUcoreutils() bool { + if stdout, err := exec.Command("cp", "--version").Output(); err == nil { + return bytes.Contains(stdout, []byte("GNU coreutils")) + } + + return false +} + +func (d *Driver) Create(id, parent string) error { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0755); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.Relabel(dir, mountLabel, "") + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, "dir", path.Base(id)) +} + +func (d *Driver) Remove(id string) error { + if _, err := os.Stat(d.dir(id)); err != nil { + return err + } + return os.RemoveAll(d.dir(id)) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +func (d *Driver) Put(id string) { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/graphdriver/vfs/vfs_test.go docker.io-1.3.2~dfsg1/daemon/graphdriver/vfs/vfs_test.go --- docker.io-0.9.1~dfsg1/daemon/graphdriver/vfs/vfs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/graphdriver/vfs/vfs_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,35 @@ +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/history.go docker.io-1.3.2~dfsg1/daemon/history.go --- docker.io-0.9.1~dfsg1/daemon/history.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/history.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,33 @@ +package daemon + +import ( + "sort" +) + +// History is a convenience type for storing a list of containers, +// ordered by creation date. +type History []*Container + +func (history *History) Len() int { + return len(*history) +} + +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +func (history *History) Swap(i, j int) { + containers := *history + tmp := containers[i] + containers[i] = containers[j] + containers[j] = tmp +} + +func (history *History) Add(container *Container) { + *history = append(*history, container) +} + +func (history *History) Sort() { + sort.Sort(history) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/image_delete.go docker.io-1.3.2~dfsg1/daemon/image_delete.go --- docker.io-0.9.1~dfsg1/daemon/image_delete.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/image_delete.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + imgs := engine.NewTable("", 0) + if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { + return job.Error(err) + } + if len(imgs.Data) == 0 { + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) + } + if _, err := imgs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: make this private and use the job instead +func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error { + var ( + repoName, tag string + tags = []string{} + ) + + // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes + repoName, tag = parsers.ParseRepositoryTag(name) + if tag == "" { + tag = graph.DEFAULTTAG + } + + img, err := daemon.Repositories().LookupImage(name) + if err != nil { + if r, _ := daemon.Repositories().Get(repoName); r != nil { + return fmt.Errorf("No such image: %s:%s", repoName, tag) + } + return fmt.Errorf("No such image: %s", name) + } + + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" + } + + byParents, err := daemon.Graph().ByParent() + if err != nil { + return err + } + + repos := daemon.Repositories().ByID()[img.ID] + + //If delete by id, see if the id belong only to one repository + if repoName == "" { + for _, repoAndTag := range repos { + parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) + if repoName == "" || repoName == parsedRepo { + repoName = parsedRepo + if parsedTag != "" { + tags = append(tags, parsedTag) + } + } else if repoName != parsedRepo && !force { + // the id belongs to multiple repos, like base:latest and user:test, + // in that case return conflict + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } + } + } else { + tags = append(tags, tag) + } + + if !first && len(tags) > 0 { + return nil + } + + if len(repos) <= 1 { + if err := daemon.canDeleteImage(img.ID, force); err != nil { + return err + } + } + + // Untag the current image + for _, tag := range tags { + tagDeleted, err := daemon.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + out := &engine.Env{} + out.Set("Untagged", repoName+":"+tag) + imgs.Add(out) + eng.Job("log", "untag", img.ID, "").Run() + } + } + tags = daemon.Repositories().ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := daemon.Repositories().DeleteAll(img.ID); err != nil { + return err + } + if err := daemon.Graph().Delete(img.ID); err != nil { + return err + } + out := &engine.Env{} + out.Set("Deleted", img.ID) + imgs.Add(out) + eng.Job("log", "delete", img.ID, "").Run() + if img.Parent != "" && !noprune { + err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune) + if first { + return err + } + + } + + } + } + return nil +} + +func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { + for _, container := range daemon.List() { + parent, err := daemon.Repositories().LookupImage(container.Image) + if err != nil { + return err + } + + if err := parent.WalkHistory(func(p *image.Image) error { + if imgID == p.ID { + if container.IsRunning() { + if force { + return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } + return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } else if !force { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } + } + return nil + }); err != nil { + return err + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/info.go docker.io-1.3.2~dfsg1/daemon/info.go --- docker.io-0.9.1~dfsg1/daemon/info.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/info.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,74 @@ +package daemon + +import ( + "os" + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { + images, _ := daemon.Graph().Map() + var imgcount int + if images == nil { + imgcount = 0 + } else { + imgcount = len(images) + } + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err == nil { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err == nil { + operatingSystem = s + } + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + log.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + initPath := utils.DockerInitPath("") + if initPath == "" { + // if that fails, we'll just return the path from the daemon + initPath = daemon.SystemInitPath() + } + + cjob := job.Eng.Job("subscribers_count") + env, _ := cjob.Stdout.AddEnv() + if err := cjob.Run(); err != nil { + return job.Error(err) + } + v := &engine.Env{} + v.SetInt("Containers", len(daemon.List())) + v.SetInt("Images", imgcount) + v.Set("Driver", daemon.GraphDriver().String()) + v.SetJson("DriverStatus", daemon.GraphDriver().Status()) + v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) + v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) + v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) + v.SetBool("Debug", os.Getenv("DEBUG") != "") + v.SetInt("NFd", utils.GetTotalUsedFds()) + v.SetInt("NGoroutines", runtime.NumGoroutine()) + v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) + v.SetInt("NEventsListener", env.GetInt("count")) + v.Set("KernelVersion", kernelVersion) + v.Set("OperatingSystem", operatingSystem) + v.Set("IndexServerAddress", registry.IndexServerAddress()) + v.Set("InitSha1", dockerversion.INITSHA1) + v.Set("InitPath", initPath) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/inspect.go docker.io-1.3.2~dfsg1/daemon/inspect.go --- docker.io-0.9.1~dfsg1/daemon/inspect.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/inspect.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,67 @@ +package daemon + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + container.Lock() + defer container.Unlock() + if job.GetenvBool("raw") { + b, err := json.Marshal(&struct { + *Container + HostConfig *runconfig.HostConfig + }{container, container.hostConfig}) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetAuto("Created", container.Created) + out.SetJson("Path", container.Path) + out.SetList("Args", container.Args) + out.SetJson("Config", container.Config) + out.SetJson("State", container.State) + out.Set("Image", container.Image) + out.SetJson("NetworkSettings", container.NetworkSettings) + out.Set("ResolvConfPath", container.ResolvConfPath) + out.Set("HostnamePath", container.HostnamePath) + out.Set("HostsPath", container.HostsPath) + out.Set("Name", container.Name) + out.Set("Driver", container.Driver) + out.Set("ExecDriver", container.ExecDriver) + out.Set("MountLabel", container.MountLabel) + out.Set("ProcessLabel", container.ProcessLabel) + out.SetJson("Volumes", container.Volumes) + out.SetJson("VolumesRW", container.VolumesRW) + out.SetJson("AppArmorProfile", container.AppArmorProfile) + + if children, err := daemon.Children(container.Name); err == nil { + for linkAlias, child := range children { + container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + } + + out.SetJson("HostConfig", container.hostConfig) + + container.hostConfig.Links = nil + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/kill.go docker.io-1.3.2~dfsg1/daemon/kill.go --- docker.io-0.9.1~dfsg1/daemon/kill.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/kill.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,59 @@ +package daemon + +import ( + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/signal" +) + +// ContainerKill send signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) + } + var ( + name = job.Args[0] + sig uint64 + err error + ) + + // If we have a signal, look at it. Otherwise, do nothing + if len(job.Args) == 2 && job.Args[1] != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") + sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) + } + + if sig == 0 { + return job.Errorf("Invalid signal: %s", job.Args[1]) + } + } + + if container := daemon.Get(name); container != nil { + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + container.LogEvent("kill") + } else { + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + // FIXME: Add event for signals + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/list.go docker.io-1.3.2~dfsg1/daemon/list.go --- docker.io-0.9.1~dfsg1/daemon/list.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/list.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,159 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/pkg/graphdb" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers/filters" +) + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*Container { + return daemon.containers.List() +} + +func (daemon *Daemon) Containers(job *engine.Job) engine.Status { + var ( + foundBefore bool + displayed int + all = job.GetenvBool("all") + since = job.Getenv("since") + before = job.Getenv("before") + n = job.GetenvInt("limit") + size = job.GetenvBool("size") + psFilters filters.Args + filt_exited []int + filt_status []string + ) + outs := engine.NewTable("Created", 0) + + psFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := psFilters["exited"]; ok { + for _, value := range i { + code, err := strconv.Atoi(value) + if err != nil { + return job.Error(err) + } + filt_exited = append(filt_exited, code) + } + } + + filt_status, _ = psFilters["status"] + + names := map[string][]string{} + daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + names[e.ID()] = append(names[e.ID()], p) + return nil + }, -1) + + var beforeCont, sinceCont *Container + if before != "" { + beforeCont = daemon.Get(before) + if beforeCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) + } + } + + if since != "" { + sinceCont = daemon.Get(since) + if sinceCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) + } + } + + errLast := errors.New("last container") + writeCont := func(container *Container) error { + container.Lock() + defer container.Unlock() + if !container.Running && !all && n <= 0 && since == "" && before == "" { + return nil + } + if before != "" && !foundBefore { + if container.ID == beforeCont.ID { + foundBefore = true + } + return nil + } + if n > 0 && displayed == n { + return errLast + } + if since != "" { + if container.ID == sinceCont.ID { + return errLast + } + } + if len(filt_exited) > 0 && !container.Running { + should_skip := true + for _, code := range filt_exited { + if code == container.ExitCode { + should_skip = false + break + } + } + if should_skip { + return nil + } + } + for _, status := range filt_status { + if container.State.StateString() != strings.ToLower(status) { + return nil + } + } + displayed++ + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetList("Names", names[container.ID]) + out.Set("Image", daemon.Repositories().ImageName(container.Image)) + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString)) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } + out.SetInt64("Created", container.Created.Unix()) + out.Set("Status", container.State.String()) + str, err := container.NetworkSettings.PortMappingAPI().ToListString() + if err != nil { + return err + } + out.Set("Ports", str) + if size { + sizeRw, sizeRootFs := container.GetSize() + out.SetInt64("SizeRw", sizeRw) + out.SetInt64("SizeRootFs", sizeRootFs) + } + outs.Add(out) + return nil + } + + for _, container := range daemon.List() { + if err := writeCont(container); err != nil { + if err != errLast { + return job.Error(err) + } + break + } + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/logs.go docker.io-1.3.2~dfsg1/daemon/logs.go --- docker.io-0.9.1~dfsg1/daemon/logs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/logs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,135 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tailfile" + "github.com/docker/docker/pkg/timeutils" +) + +func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + tail = job.Getenv("tail") + follow = job.GetenvBool("follow") + times = job.GetenvBool("timestamps") + lines = -1 + format string + ) + if !(stdout || stderr) { + return job.Errorf("You must choose at least one stream") + } + if times { + format = timeutils.RFC3339NanoFixed + } + if tail == "" { + tail = "all" + } + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + if tail != "all" { + var err error + lines, err = strconv.Atoi(tail) + if err != nil { + log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) + lines = -1 + } + } + if lines != 0 { + if lines > 0 { + f := cLog.(*os.File) + ls, err := tailfile.TailFile(f, lines) + if err != nil { + return job.Error(err) + } + tmp := bytes.NewBuffer([]byte{}) + for _, l := range ls { + fmt.Fprintf(tmp, "%s\n", l) + } + cLog = tmp + } + dec := json.NewDecoder(cLog) + l := &jsonlog.JSONLog{} + for { + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + logLine := l.Log + if times { + logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) + } + if l.Stream == "stdout" && stdout { + io.WriteString(job.Stdout, logLine) + } + if l.Stream == "stderr" && stderr { + io.WriteString(job.Stderr, logLine) + } + l.Reset() + } + } + } + if follow && container.IsRunning() { + errors := make(chan error, 2) + if stdout { + stdoutPipe := container.StdoutLogPipe() + defer stdoutPipe.Close() + go func() { + errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + }() + } + if stderr { + stderrPipe := container.StderrLogPipe() + defer stderrPipe.Close() + go func() { + errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + }() + } + err := <-errors + if err != nil { + log.Errorf("%s", err) + } + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/MAINTAINERS docker.io-1.3.2~dfsg1/daemon/MAINTAINERS --- docker.io-0.9.1~dfsg1/daemon/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,6 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) +volumes.go: Brian Goff (@cpuguy83) diff -Nru docker.io-0.9.1~dfsg1/daemon/monitor.go docker.io-1.3.2~dfsg1/daemon/monitor.go --- docker.io-0.9.1~dfsg1/daemon/monitor.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/monitor.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,309 @@ +package daemon + +import ( + "io" + "os/exec" + "sync" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/runconfig" +) + +const defaultTimeIncrement = 100 + +// containerMonitor monitors the execution of a container's main process. +// If a restart policy is specified for the cotnainer the monitor will ensure that the +// process is restarted based on the rules of the policy. When the container is finally stopped +// the monitor will reset and cleanup any of the container resources such as networking allocations +// and the rootfs +type containerMonitor struct { + mux sync.Mutex + + // container is the container being monitored + container *Container + + // restartPolicy is the current policy being applied to the container monitor + restartPolicy runconfig.RestartPolicy + + // failureCount is the number of times the container has failed to + // start in a row + failureCount int + + // shouldStop signals the monitor that the next time the container exits it is + // either because docker or the user asked for the container to be stopped + shouldStop bool + + // startSignal is a channel that is closes after the container initially starts + startSignal chan struct{} + + // stopChan is used to signal to the monitor whenever there is a wait for the + // next restart so that the timeIncrement is not honored and the user is not + // left waiting for nothing to happen during this time + stopChan chan struct{} + + // timeIncrement is the amount of time to wait between restarts + // this is in milliseconds + timeIncrement int + + // lastStartTime is the time which the monitor last exec'd the container's process + lastStartTime time.Time +} + +// newContainerMonitor returns an initialized containerMonitor for the provided container +// honoring the provided restart policy +func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { + return &containerMonitor{ + container: container, + restartPolicy: policy, + timeIncrement: defaultTimeIncrement, + stopChan: make(chan struct{}), + startSignal: make(chan struct{}), + } +} + +// Stop signals to the container monitor that it should stop monitoring the container +// for exits the next time the process dies +func (m *containerMonitor) ExitOnNext() { + m.mux.Lock() + + // we need to protect having a double close of the channel when stop is called + // twice or else we will get a panic + if !m.shouldStop { + m.shouldStop = true + close(m.stopChan) + } + + m.mux.Unlock() +} + +// Close closes the container's resources such as networking allocations and +// unmounts the contatiner's root filesystem +func (m *containerMonitor) Close() error { + // Cleanup networking and mounts + m.container.cleanup() + + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + if err := m.container.toDisk(); err != nil { + log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) + + return err + } + + return nil +} + +// Start starts the containers process and monitors it according to the restart policy +func (m *containerMonitor) Start() error { + var ( + err error + exitStatus int + // this variable indicates where we in execution flow: + // before Run or after + afterRun bool + ) + + // ensure that when the monitor finally exits we release the networking and unmount the rootfs + defer func() { + if afterRun { + m.container.Lock() + m.container.setStopped(exitStatus) + defer m.container.Unlock() + } + m.Close() + }() + + // reset the restart count + m.container.RestartCount = -1 + + for { + m.container.RestartCount++ + + if err := m.container.startLoggingToDisk(); err != nil { + m.resetContainer(false) + + return err + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("start") + + m.lastStartTime = time.Now() + + if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + if m.container.RestartCount == 0 { + m.resetContainer(false) + + return err + } + + log.Errorf("Error running container: %s", err) + } + + // here container.Lock is already lost + afterRun = true + + m.resetMonitor(err == nil && exitStatus == 0) + + if m.shouldRestart(exitStatus) { + m.container.SetRestarting(exitStatus) + m.container.LogEvent("die") + m.resetContainer(true) + + // sleep with a small time increment between each restart to help avoid issues cased by quickly + // restarting the container because of some types of errors ( networking cut out, etc... ) + m.waitForNextRestart() + + // we need to check this before reentering the loop because the waitForNextRestart could have + // been terminated by a request from a user + if m.shouldStop { + return err + } + continue + } + m.container.LogEvent("die") + m.resetContainer(true) + return err + } +} + +// resetMonitor resets the stateful fields on the containerMonitor based on the +// previous runs success or failure. Reguardless of success, if the container had +// an execution time of more than 10s then reset the timer back to the default +func (m *containerMonitor) resetMonitor(successful bool) { + executionTime := time.Now().Sub(m.lastStartTime).Seconds() + + if executionTime > 10 { + m.timeIncrement = defaultTimeIncrement + } else { + // otherwise we need to increment the amount of time we wait before restarting + // the process. We will build up by multiplying the increment by 2 + m.timeIncrement *= 2 + } + + // the container exited successfully so we need to reset the failure counter + if successful { + m.failureCount = 0 + } else { + m.failureCount++ + } +} + +// waitForNextRestart waits with the default time increment to restart the container unless +// a user or docker asks for the container to be stopped +func (m *containerMonitor) waitForNextRestart() { + select { + case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): + case <-m.stopChan: + } +} + +// shouldRestart checks the restart policy and applies the rules to determine if +// the container's process should be restarted +func (m *containerMonitor) shouldRestart(exitStatus int) bool { + m.mux.Lock() + defer m.mux.Unlock() + + // do not restart if the user or docker has requested that this container be stopped + if m.shouldStop { + return false + } + + switch m.restartPolicy.Name { + case "always": + return true + case "on-failure": + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount >= max { + log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", max) + return false + } + + return exitStatus != 0 + } + + return false +} + +// callback ensures that the container's state is properly updated after we +// received ack from the execution drivers +func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + + m.container.setRunning(pid) + + // signal that the process has started + // close channel only if not closed + select { + case <-m.startSignal: + default: + close(m.startSignal) + } + + if err := m.container.ToDisk(); err != nil { + log.Debugf("%s", err) + } +} + +// resetContainer resets the container's IO and ensures that the command is able to be executed again +// by copying the data into a new struct +// if lock is true, then container locked during reset +func (m *containerMonitor) resetContainer(lock bool) { + container := m.container + if lock { + container.Lock() + defer container.Unlock() + } + + if container.Config.OpenStdin { + if err := container.stdin.Close(); err != nil { + log.Errorf("%s: Error close stdin: %s", container.ID, err) + } + } + + if err := container.stdout.Clean(); err != nil { + log.Errorf("%s: Error close stdout: %s", container.ID, err) + } + + if err := container.stderr.Clean(); err != nil { + log.Errorf("%s: Error close stderr: %s", container.ID, err) + } + + if container.command != nil && container.command.ProcessConfig.Terminal != nil { + if err := container.command.ProcessConfig.Terminal.Close(); err != nil { + log.Errorf("%s: Error closing terminal: %s", container.ID, err) + } + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + c := container.command.ProcessConfig.Cmd + + container.command.ProcessConfig.Cmd = exec.Cmd{ + Stdin: c.Stdin, + Stdout: c.Stdout, + Stderr: c.Stderr, + Path: c.Path, + Env: c.Env, + ExtraFiles: c.ExtraFiles, + Args: c.Args, + Dir: c.Dir, + SysProcAttr: c.SysProcAttr, + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/bridge/driver.go docker.io-1.3.2~dfsg1/daemon/networkdriver/bridge/driver.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/bridge/driver.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/bridge/driver.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,544 @@ +package bridge + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + "sync" + + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/daemon/networkdriver/ipallocator" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/iptables" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/libcontainer/netlink" +) + +const ( + DefaultNetworkBridge = "docker0" + MaxAllocatedPortAttempts = 10 +) + +// Network interface represents the networking stack of a container +type networkInterface struct { + IP net.IP + PortMappings []net.Addr // there are mappings to the host interfaces +} + +type ifaces struct { + c map[string]*networkInterface + sync.Mutex +} + +func (i *ifaces) Set(key string, n *networkInterface) { + i.Lock() + i.c[key] = n + i.Unlock() +} + +func (i *ifaces) Get(key string) *networkInterface { + i.Lock() + res := i.c[key] + i.Unlock() + return res +} + +var ( + addrs = []string{ + // Here we don't follow the convention of using the 1st IP of the range for the gateway. + // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. + // In theory this shouldn't matter - in practice there's bound to be a few scripts relying + // on the internal addressing or other stupid things like that. + // They shouldn't, but hey, let's not break them unless we really have to. + "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 + "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive + "10.1.42.1/16", + "10.42.42.1/16", + "172.16.42.1/24", + "172.16.43.1/24", + "172.16.44.1/24", + "10.0.42.1/24", + "10.0.43.1/24", + "192.168.42.1/24", + "192.168.43.1/24", + "192.168.44.1/24", + } + + bridgeIface string + bridgeNetwork *net.IPNet + + defaultBindingIP = net.ParseIP("0.0.0.0") + currentInterfaces = ifaces{c: make(map[string]*networkInterface)} +) + +func InitDriver(job *engine.Job) engine.Status { + var ( + network *net.IPNet + enableIPTables = job.GetenvBool("EnableIptables") + icc = job.GetenvBool("InterContainerCommunication") + ipMasq = job.GetenvBool("EnableIpMasq") + ipForward = job.GetenvBool("EnableIpForward") + bridgeIP = job.Getenv("BridgeIP") + fixedCIDR = job.Getenv("FixedCIDR") + ) + + if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { + defaultBindingIP = net.ParseIP(defaultIP) + } + + bridgeIface = job.Getenv("BridgeIface") + usingDefaultBridge := false + if bridgeIface == "" { + usingDefaultBridge = true + bridgeIface = DefaultNetworkBridge + } + + addr, err := networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + // If we're not using the default bridge, fail without trying to create it + if !usingDefaultBridge { + return job.Error(err) + } + // If the iface is not found, try to create it + if err := createBridge(bridgeIP); err != nil { + return job.Error(err) + } + + addr, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + return job.Error(err) + } + network = addr.(*net.IPNet) + } else { + network = addr.(*net.IPNet) + // validate that the bridge ip matches the ip specified by BridgeIP + if bridgeIP != "" { + bip, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return job.Error(err) + } + if !network.IP.Equal(bip) { + return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip) + } + } + } + + // Configure iptables for link support + if enableIPTables { + if err := setupIPTables(addr, icc, ipMasq); err != nil { + return job.Error(err) + } + } + + if ipForward { + // Enable IPv4 forwarding + if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { + job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) + } + } + + // We can always try removing the iptables + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { + return job.Error(err) + } + + if enableIPTables { + chain, err := iptables.NewChain("DOCKER", bridgeIface) + if err != nil { + return job.Error(err) + } + portmapper.SetIptablesChain(chain) + } + + bridgeNetwork = network + if fixedCIDR != "" { + _, subnet, err := net.ParseCIDR(fixedCIDR) + if err != nil { + return job.Error(err) + } + log.Debugf("Subnet: %v", subnet) + if err := ipallocator.RegisterSubnet(bridgeNetwork, subnet); err != nil { + return job.Error(err) + } + } + + // https://github.com/docker/docker/issues/2768 + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) + + for name, f := range map[string]engine.Handler{ + "allocate_interface": Allocate, + "release_interface": Release, + "allocate_port": AllocatePort, + "link": LinkContainers, + } { + if err := job.Eng.Register(name, f); err != nil { + return job.Error(err) + } + } + return engine.StatusOK +} + +func setupIPTables(addr net.Addr, icc, ipmasq bool) error { + // Enable NAT + + if ipmasq { + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"} + + if !iptables.Exists(natArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { + return fmt.Errorf("Unable to enable network bridge NAT: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables postrouting: %s", output) + } + } + } + + var ( + args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if !icc { + iptables.Raw(append([]string{"-D"}, acceptArgs...)...) + + if !iptables.Exists(dropArgs...) { + log.Debugf("Disable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error disabling intercontainer communication: %s", output) + } + } + } else { + iptables.Raw(append([]string{"-D"}, dropArgs...)...) + + if !iptables.Exists(acceptArgs...) { + log.Debugf("Enable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error enabling intercontainer communication: %s", output) + } + } + } + + // Accept all non-intercontainer outgoing packets + outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} + if !iptables.Exists(outgoingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow outgoing packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow outgoing: %s", output) + } + } + + // Accept incoming packets for existing connections + existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} + + if !iptables.Exists(existingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow incoming packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow incoming: %s", output) + } + } + return nil +} + +// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, +// and attempts to configure it with an address which doesn't conflict with any other interface on the host. +// If it can't find an address which doesn't conflict, it will return an error. +func createBridge(bridgeIP string) error { + nameservers := []string{} + resolvConf, _ := resolvconf.Get() + // we don't check for an error here, because we don't really care + // if we can't read /etc/resolv.conf. So instead we skip the append + // if resolvConf is nil. It either doesn't exist, or we can't read it + // for some reason. + if resolvConf != nil { + nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...) + } + + var ifaceAddr string + if len(bridgeIP) != 0 { + _, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return err + } + ifaceAddr = bridgeIP + } else { + for _, addr := range addrs { + _, dockerNetwork, err := net.ParseCIDR(addr) + if err != nil { + return err + } + if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { + if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { + ifaceAddr = addr + break + } else { + log.Debugf("%s %s", addr, err) + } + } + } + } + + if ifaceAddr == "" { + return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) + } + log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + + if err := createBridgeIface(bridgeIface); err != nil { + return err + } + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + + ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) + if err != nil { + return err + } + + if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + return fmt.Errorf("Unable to add private network: %s", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to start network bridge: %s", err) + } + return nil +} + +func createBridgeIface(name string) error { + kv, err := kernel.GetKernelVersion() + // only set the bridge's mac address if the kernel version is > 3.3 + // before that it was not supported + setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) + log.Debugf("setting bridge mac address = %v", setBridgeMacAddr) + return netlink.CreateBridge(name, setBridgeMacAddr) +} + +// Generate a IEEE802 compliant MAC address from the given IP address. +// +// The generator is guaranteed to be consistent: the same IP will always yield the same +// MAC address. This is to avoid ARP cache issues. +func generateMacAddr(ip net.IP) net.HardwareAddr { + hw := make(net.HardwareAddr, 6) + + // The first byte of the MAC address has to comply with these rules: + // 1. Unicast: Set the least-significant bit to 0. + // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1. + // 3. As "small" as possible: The veth address has to be "smaller" than the bridge address. + hw[0] = 0x02 + + // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI). + // Since this address is locally administered, we can do whatever we want as long as + // it doesn't conflict with other addresses. + hw[1] = 0x42 + + // Insert the IP address into the last 32 bits of the MAC address. + // This is a simple way to guarantee the address will be consistent and unique. + copy(hw[2:], ip.To4()) + + return hw +} + +// Allocate a network interface +func Allocate(job *engine.Job) engine.Status { + var ( + ip net.IP + mac net.HardwareAddr + err error + id = job.Args[0] + requestedIP = net.ParseIP(job.Getenv("RequestedIP")) + ) + + if requestedIP != nil { + ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP) + } else { + ip, err = ipallocator.RequestIP(bridgeNetwork, nil) + } + if err != nil { + return job.Error(err) + } + + // If no explicit mac address was given, generate a random one. + if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { + mac = generateMacAddr(ip) + } + + out := engine.Env{} + out.Set("IP", ip.String()) + out.Set("Mask", bridgeNetwork.Mask.String()) + out.Set("Gateway", bridgeNetwork.IP.String()) + out.Set("MacAddress", mac.String()) + out.Set("Bridge", bridgeIface) + + size, _ := bridgeNetwork.Mask.Size() + out.SetInt("IPPrefixLen", size) + + currentInterfaces.Set(id, &networkInterface{ + IP: ip, + }) + + out.WriteTo(job.Stdout) + + return engine.StatusOK +} + +// release an interface for a select ip +func Release(job *engine.Job) engine.Status { + var ( + id = job.Args[0] + containerInterface = currentInterfaces.Get(id) + ) + + if containerInterface == nil { + return job.Errorf("No network information to release for %s", id) + } + + for _, nat := range containerInterface.PortMappings { + if err := portmapper.Unmap(nat); err != nil { + log.Infof("Unable to unmap port %s: %s", nat, err) + } + } + + if err := ipallocator.ReleaseIP(bridgeNetwork, containerInterface.IP); err != nil { + log.Infof("Unable to release ip %s", err) + } + return engine.StatusOK +} + +// Allocate an external port and map it to the interface +func AllocatePort(job *engine.Job) engine.Status { + var ( + err error + + ip = defaultBindingIP + id = job.Args[0] + hostIP = job.Getenv("HostIP") + hostPort = job.GetenvInt("HostPort") + containerPort = job.GetenvInt("ContainerPort") + proto = job.Getenv("Proto") + network = currentInterfaces.Get(id) + ) + + if hostIP != "" { + ip = net.ParseIP(hostIP) + if ip == nil { + return job.Errorf("Bad parameter: invalid host ip %s", hostIP) + } + } + + // host ip, proto, and host port + var container net.Addr + switch proto { + case "tcp": + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + case "udp": + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + default: + return job.Errorf("unsupported address type %s", proto) + } + + // + // Try up to 10 times to get a port that's not already allocated. + // + // In the event of failure to bind, return the error that portmapper.Map + // yields. + // + + var host net.Addr + for i := 0; i < MaxAllocatedPortAttempts; i++ { + if host, err = portmapper.Map(container, ip, hostPort); err == nil { + break + } + + if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok { + // There is no point in immediately retrying to map an explicitly + // chosen port. + if hostPort != 0 { + job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error()) + break + } + + // Automatically chosen 'free' port failed to bind: move on the next. + job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String()) + } else { + // some other error during mapping + job.Logf("Received an unexpected error during port allocation: %s", err.Error()) + break + } + } + + if err != nil { + return job.Error(err) + } + + network.PortMappings = append(network.PortMappings, host) + + out := engine.Env{} + switch netAddr := host.(type) { + case *net.TCPAddr: + out.Set("HostIP", netAddr.IP.String()) + out.SetInt("HostPort", netAddr.Port) + case *net.UDPAddr: + out.Set("HostIP", netAddr.IP.String()) + out.SetInt("HostPort", netAddr.Port) + } + if _, err := out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +func LinkContainers(job *engine.Job) engine.Status { + var ( + action = job.Args[0] + childIP = job.Getenv("ChildIP") + parentIP = job.Getenv("ParentIP") + ignoreErrors = job.GetenvBool("IgnoreErrors") + ports = job.GetenvList("Ports") + ) + split := func(p string) (string, string) { + parts := strings.Split(p, "/") + return parts[0], parts[1] + } + + for _, p := range ports { + port, proto := split(p) + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", parentIP, + "--dport", port, + "-d", childIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + return job.Error(err) + } else if len(output) != 0 { + return job.Errorf("Error toggle iptables forward: %s", output) + } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + return job.Error(err) + } else if len(output) != 0 { + return job.Errorf("Error toggle iptables forward: %s", output) + } + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/bridge/driver_test.go docker.io-1.3.2~dfsg1/daemon/networkdriver/bridge/driver_test.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/bridge/driver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/bridge/driver_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,120 @@ +package bridge + +import ( + "net" + "strconv" + "testing" + + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" +) + +func init() { + // reset the new proxy command for mocking out the userland proxy in tests + portmapper.NewProxy = portmapper.NewMockProxyCommand +} + +func findFreePort(t *testing.T) int { + l, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal("Failed to find a free port") + } + defer l.Close() + + result, err := net.ResolveTCPAddr("tcp", l.Addr().String()) + if err != nil { + t.Fatal("Failed to resolve address to identify free port") + } + return result.Port +} + +func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) { + strPort := strconv.Itoa(port) + + job = eng.Job("allocate_port", "container_id") + job.Setenv("HostIP", "127.0.0.1") + job.Setenv("HostPort", strPort) + job.Setenv("Proto", "tcp") + job.Setenv("ContainerPort", strPort) + return +} + +func newPortAllocationJobWithInvalidHostIP(eng *engine.Engine, port int) (job *engine.Job) { + strPort := strconv.Itoa(port) + + job = eng.Job("allocate_port", "container_id") + job.Setenv("HostIP", "localhost") + job.Setenv("HostPort", strPort) + job.Setenv("Proto", "tcp") + job.Setenv("ContainerPort", strPort) + return +} + +func TestAllocatePortDetection(t *testing.T) { + eng := engine.New() + eng.Logging = false + + freePort := findFreePort(t) + + // Init driver + job := eng.Job("initdriver") + if res := InitDriver(job); res != engine.StatusOK { + t.Fatal("Failed to initialize network driver") + } + + // Allocate interface + job = eng.Job("allocate_interface", "container_id") + if res := Allocate(job); res != engine.StatusOK { + t.Fatal("Failed to allocate network interface") + } + + // Allocate same port twice, expect failure on second call + job = newPortAllocationJob(eng, freePort) + if res := AllocatePort(job); res != engine.StatusOK { + t.Fatal("Failed to find a free port to allocate") + } + if res := AllocatePort(job); res == engine.StatusOK { + t.Fatal("Duplicate port allocation granted by AllocatePort") + } +} + +func TestHostnameFormatChecking(t *testing.T) { + eng := engine.New() + eng.Logging = false + + freePort := findFreePort(t) + + // Init driver + job := eng.Job("initdriver") + if res := InitDriver(job); res != engine.StatusOK { + t.Fatal("Failed to initialize network driver") + } + + // Allocate interface + job = eng.Job("allocate_interface", "container_id") + if res := Allocate(job); res != engine.StatusOK { + t.Fatal("Failed to allocate network interface") + } + + // Allocate port with invalid HostIP, expect failure with Bad Request http status + job = newPortAllocationJobWithInvalidHostIP(eng, freePort) + if res := AllocatePort(job); res == engine.StatusOK { + t.Fatal("Failed to check invalid HostIP") + } +} + +func TestMacAddrGeneration(t *testing.T) { + ip := net.ParseIP("192.168.0.1") + mac := generateMacAddr(ip).String() + + // Should be consistent. + if generateMacAddr(ip).String() != mac { + t.Fatal("Inconsistent MAC address") + } + + // Should be unique. + ip2 := net.ParseIP("192.168.0.2") + if generateMacAddr(ip2).String() == mac { + t.Fatal("Non-unique MAC address") + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/ipallocator/allocator.go docker.io-1.3.2~dfsg1/daemon/networkdriver/ipallocator/allocator.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/ipallocator/allocator.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/ipallocator/allocator.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,150 @@ +package ipallocator + +import ( + "encoding/binary" + "errors" + "net" + "sync" + + "github.com/docker/docker/daemon/networkdriver" +) + +// allocatedMap is thread-unsafe set of allocated IP +type allocatedMap struct { + p map[uint32]struct{} + last uint32 + begin uint32 + end uint32 +} + +func newAllocatedMap(network *net.IPNet) *allocatedMap { + firstIP, lastIP := networkdriver.NetworkRange(network) + begin := ipToInt(firstIP) + 2 + end := ipToInt(lastIP) - 1 + return &allocatedMap{ + p: make(map[uint32]struct{}), + begin: begin, + end: end, + last: begin - 1, // so first allocated will be begin + } +} + +type networkSet map[string]*allocatedMap + +var ( + ErrNoAvailableIPs = errors.New("no available ip addresses on network") + ErrIPAlreadyAllocated = errors.New("ip already allocated") + ErrIPOutOfRange = errors.New("requested ip is out of range") + ErrNetworkAlreadyRegistered = errors.New("network already registered") + ErrBadSubnet = errors.New("network does not contain specified subnet") +) + +var ( + lock = sync.Mutex{} + allocatedIPs = networkSet{} +) + +// RegisterSubnet registers network in global allocator with bounds +// defined by subnet. If you want to use network range you must call +// this method before first RequestIP, otherwise full network range will be used +func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error { + lock.Lock() + defer lock.Unlock() + key := network.String() + if _, ok := allocatedIPs[key]; ok { + return ErrNetworkAlreadyRegistered + } + n := newAllocatedMap(network) + beginIP, endIP := networkdriver.NetworkRange(subnet) + begin, end := ipToInt(beginIP)+1, ipToInt(endIP)-1 + if !(begin >= n.begin && end <= n.end && begin < end) { + return ErrBadSubnet + } + n.begin = begin + n.end = end + n.last = begin - 1 + allocatedIPs[key] = n + return nil +} + +// RequestIP requests an available ip from the given network. It +// will return the next available ip if the ip provided is nil. If the +// ip provided is not nil it will validate that the provided ip is available +// for use or return an error +func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) { + lock.Lock() + defer lock.Unlock() + key := network.String() + allocated, ok := allocatedIPs[key] + if !ok { + allocated = newAllocatedMap(network) + allocatedIPs[key] = allocated + } + + if ip == nil { + return allocated.getNextIP() + } + return allocated.checkIP(ip) +} + +// ReleaseIP adds the provided ip back into the pool of +// available ips to be returned for use. +func ReleaseIP(network *net.IPNet, ip net.IP) error { + lock.Lock() + defer lock.Unlock() + if allocated, exists := allocatedIPs[network.String()]; exists { + pos := ipToInt(ip) + delete(allocated.p, pos) + } + return nil +} + +func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { + pos := ipToInt(ip) + + // Verify that the IP address has not been already allocated. + if _, ok := allocated.p[pos]; ok { + return nil, ErrIPAlreadyAllocated + } + + // Verify that the IP address is within our network range. + if pos < allocated.begin || pos > allocated.end { + return nil, ErrIPOutOfRange + } + + // Register the IP. + allocated.p[pos] = struct{}{} + allocated.last = pos + + return ip, nil +} + +// return an available ip if one is currently available. If not, +// return the next available ip for the nextwork +func (allocated *allocatedMap) getNextIP() (net.IP, error) { + for pos := allocated.last + 1; pos != allocated.last; pos++ { + if pos > allocated.end { + pos = allocated.begin + } + if _, ok := allocated.p[pos]; ok { + continue + } + allocated.p[pos] = struct{}{} + allocated.last = pos + return intToIP(pos), nil + } + return nil, ErrNoAvailableIPs +} + +// Converts a 4 bytes IP into a 32 bit integer +func ipToInt(ip net.IP) uint32 { + return binary.BigEndian.Uint32(ip.To4()) +} + +// Converts 32 bit integer into a 4 bytes IP address +func intToIP(n uint32) net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, n) + ip := net.IP(b) + return ip +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/ipallocator/allocator_test.go docker.io-1.3.2~dfsg1/daemon/networkdriver/ipallocator/allocator_test.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/ipallocator/allocator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/ipallocator/allocator_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,434 @@ +package ipallocator + +import ( + "fmt" + "net" + "testing" +) + +func reset() { + allocatedIPs = networkSet{} +} + +func TestRequestNewIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ip net.IP + var err error + for i := 2; i < 10; i++ { + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } + value := intToIP(ipToInt(ip) + 1).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } +} + +func TestReleaseIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + +func TestGetReleasedIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + for i := 0; i < 252; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + +func TestRequestSpecificIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 224}, + } + + ip := net.ParseIP("192.168.0.5") + + // Request a "good" IP. + if _, err := RequestIP(network, ip); err != nil { + t.Fatal(err) + } + + // Request the same IP again. + if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { + t.Fatalf("Got the same IP twice: %#v", err) + } + + // Request an out of range IP. + if _, err := RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange { + t.Fatalf("Got an out of range IP: %#v", err) + } +} + +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToInt(ip) + if i == 0 { + t.Fatal("converted to zero") + } + conv := intToIP(i) + if !ip.Equal(conv) { + t.Error(conv.String()) + } +} + +func TestIPAllocator(t *testing.T) { + expectedIPs := []net.IP{ + 0: net.IPv4(127, 0, 0, 2), + 1: net.IPv4(127, 0, 0, 3), + 2: net.IPv4(127, 0, 0, 4), + 3: net.IPv4(127, 0, 0, 5), + 4: net.IPv4(127, 0, 0, 6), + } + + gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} + // Pool after initialisation (f = free, u = used) + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that + // order. + for i := 0; i < 5; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, expectedIPs[i], ip) + } + // Before loop begin + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) + // ↑ + + // After i = 3 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // ↑ + + // After i = 4 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Check that there are no more IPs + ip, err := RequestIP(network, nil) + if err == nil { + t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) + } + + // Release some IPs in non-sequential order + if err := ReleaseIP(network, expectedIPs[3]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, expectedIPs[2]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, expectedIPs[4]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // Make sure that IPs are reused in sequential order, starting + // with the first released IP + newIPs := make([]net.IP, 3) + for i := 0; i < 3; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + newIPs[i] = ip + } + assertIPEquals(t, expectedIPs[2], newIPs[0]) + assertIPEquals(t, expectedIPs[3], newIPs[1]) + assertIPEquals(t, expectedIPs[4], newIPs[2]) + + _, err = RequestIP(network, nil) + if err == nil { + t.Fatal("There shouldn't be any IP addresses at this point") + } +} + +func TestAllocateFirstIP(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 0}, + Mask: []byte{255, 255, 255, 0}, + } + + firstIP := network.IP.To4().Mask(network.Mask) + first := ipToInt(firstIP) + 1 + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + allocated := ipToInt(ip) + + if allocated == first { + t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) + } +} + +func TestAllocateAllIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + var ( + current, first net.IP + err error + isFirst = true + ) + + for err == nil { + current, err = RequestIP(network, nil) + if isFirst { + first = current + isFirst = false + } + } + + if err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + again, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, again) +} + +func TestAllocateDifferentSubnets(t *testing.T) { + defer reset() + network1 := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + network2 := &net.IPNet{ + IP: []byte{127, 0, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 2), + 1: net.IPv4(192, 168, 0, 3), + 2: net.IPv4(127, 0, 0, 2), + 3: net.IPv4(127, 0, 0, 3), + } + + ip11, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip12, err := RequestIP(network1, nil) + if err != nil { + t.Fatal(err) + } + ip21, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + ip22, err := RequestIP(network2, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, expectedIPs[0], ip11) + assertIPEquals(t, expectedIPs[1], ip12) + assertIPEquals(t, expectedIPs[2], ip21) + assertIPEquals(t, expectedIPs[3], ip22) +} +func TestRegisterBadTwice(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 255, 0}, + } + subnet := &net.IPNet{ + IP: []byte{192, 168, 1, 8}, + Mask: []byte{255, 255, 255, 248}, + } + + if err := RegisterSubnet(network, subnet); err != nil { + t.Fatal(err) + } + subnet = &net.IPNet{ + IP: []byte{192, 168, 1, 16}, + Mask: []byte{255, 255, 255, 248}, + } + if err := RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered { + t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err) + } +} + +func TestRegisterBadRange(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 255, 0}, + } + subnet := &net.IPNet{ + IP: []byte{192, 168, 1, 1}, + Mask: []byte{255, 255, 0, 0}, + } + if err := RegisterSubnet(network, subnet); err != ErrBadSubnet { + t.Fatalf("Expected ErrBadSubnet error, got %v", err) + } +} + +func TestAllocateFromRange(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + // 192.168.1.9 - 192.168.1.14 + subnet := &net.IPNet{ + IP: []byte{192, 168, 0, 8}, + Mask: []byte{255, 255, 255, 248}, + } + if err := RegisterSubnet(network, subnet); err != nil { + t.Fatal(err) + } + expectedIPs := []net.IP{ + 0: net.IPv4(192, 168, 0, 9), + 1: net.IPv4(192, 168, 0, 10), + 2: net.IPv4(192, 168, 0, 11), + 3: net.IPv4(192, 168, 0, 12), + 4: net.IPv4(192, 168, 0, 13), + 5: net.IPv4(192, 168, 0, 14), + } + for _, ip := range expectedIPs { + rip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, ip, rip) + } + + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err) + } + for _, ip := range expectedIPs { + ReleaseIP(network, ip) + rip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + assertIPEquals(t, ip, rip) + } +} + +func assertIPEquals(t *testing.T, ip1, ip2 net.IP) { + if !ip1.Equal(ip2) { + t.Fatalf("Expected IP %s, got %s", ip1, ip2) + } +} + +func BenchmarkRequestIP(b *testing.B) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 253; j++ { + _, err := RequestIP(network, nil) + if err != nil { + b.Fatal(err) + } + } + reset() + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/network.go docker.io-1.3.2~dfsg1/daemon/networkdriver/network.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/network.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/network.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +package networkdriver + +import ( + "errors" +) + +var ( + ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") + ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") +) diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/network_test.go docker.io-1.3.2~dfsg1/daemon/networkdriver/network_test.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/network_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/network_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,190 @@ +package networkdriver + +import ( + "github.com/docker/libcontainer/netlink" + "net" + "testing" +) + +func TestNonOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "127.0.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err != nil { + t.Fatal(err) + } +} + +func TestOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "192.168.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err == nil { + t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) + } +} + +func TestCheckRouteOverlaps(t *testing.T) { + orig := networkGetRoutesFct + defer func() { + networkGetRoutesFct = orig + }() + networkGetRoutesFct = func() ([]netlink.Route, error) { + routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} + + routes := []netlink.Route{} + for _, addr := range routesData { + _, netX, _ := net.ParseCIDR(addr) + routes = append(routes, netlink.Route{IPNet: netX}) + } + return routes, nil + } + + _, netX, _ := net.ParseCIDR("172.16.0.1/24") + if err := CheckRouteOverlaps(netX); err != nil { + t.Fatal(err) + } + + _, netX, _ = net.ParseCIDR("10.0.2.0/24") + if err := CheckRouteOverlaps(netX); err == nil { + t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") + } +} + +func TestCheckNameserverOverlaps(t *testing.T) { + nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} + + _, netX, _ := net.ParseCIDR("10.0.2.3/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err == nil { + t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) + } + + _, netX, _ = net.ParseCIDR("192.168.102.2/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err != nil { + t.Fatalf("%s should not overlap %v but it does", netX, nameservers) + } +} + +func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if !NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should overlap", netX, netY) + } +} + +func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should not overlap", netX, netY) + } +} + +func TestNetworkOverlaps(t *testing.T) { + //netY starts at same IP and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) + //netY starts within netX and ends at same IP + AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) + //netY starts and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) + //netY starts at same IP and ends outside of netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) + //netY starts before and ends at same IP of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + //netY starts before and ends outside of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) + //netY starts and ends before netX + AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) + //netX starts and ends before netY + AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) +} + +func TestNetworkRange(t *testing.T) { + // Simple class C test + _, network, _ := net.ParseCIDR("192.168.0.1/24") + first, last := NetworkRange(network) + if !first.Equal(net.ParseIP("192.168.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("192.168.0.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 256 { + t.Error(size) + } + + // Class A test + _, network, _ = net.ParseCIDR("10.0.0.1/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 16777216 { + t.Error(size) + } + + // Class A, random IP address + _, network, _ = net.ParseCIDR("10.1.2.3/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + + // 32bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/32") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.3")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 1 { + t.Error(size) + } + + // 31bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/31") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.2")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 2 { + t.Error(size) + } + + // 26bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/26") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.63")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 64 { + t.Error(size) + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portallocator/portallocator.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portallocator/portallocator.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portallocator/portallocator.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portallocator/portallocator.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +package portallocator + +import ( + "errors" + "fmt" + "net" + "sync" +) + +type portMap struct { + p map[int]struct{} + last int +} + +func newPortMap() *portMap { + return &portMap{ + p: map[int]struct{}{}, + } +} + +type protoMap map[string]*portMap + +func newProtoMap() protoMap { + return protoMap{ + "tcp": newPortMap(), + "udp": newPortMap(), + } +} + +type ipMapping map[string]protoMap + +const ( + BeginPortRange = 49153 + EndPortRange = 65535 +) + +var ( + ErrAllPortsAllocated = errors.New("all ports are allocated") + ErrUnknownProtocol = errors.New("unknown protocol") +) + +var ( + mutex sync.Mutex + + defaultIP = net.ParseIP("0.0.0.0") + globalMap = ipMapping{} +) + +type ErrPortAlreadyAllocated struct { + ip string + port int +} + +func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated { + return ErrPortAlreadyAllocated{ + ip: ip, + port: port, + } +} + +func (e ErrPortAlreadyAllocated) IP() string { + return e.ip +} + +func (e ErrPortAlreadyAllocated) Port() int { + return e.port +} + +func (e ErrPortAlreadyAllocated) IPPort() string { + return fmt.Sprintf("%s:%d", e.ip, e.port) +} + +func (e ErrPortAlreadyAllocated) Error() string { + return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) +} + +// RequestPort requests new port from global ports pool for specified ip and proto. +// If port is 0 it returns first free port. Otherwise it cheks port availability +// in pool and return that port or error if port is already busy. +func RequestPort(ip net.IP, proto string, port int) (int, error) { + mutex.Lock() + defer mutex.Unlock() + + if proto != "tcp" && proto != "udp" { + return 0, ErrUnknownProtocol + } + + if ip == nil { + ip = defaultIP + } + ipstr := ip.String() + protomap, ok := globalMap[ipstr] + if !ok { + protomap = newProtoMap() + globalMap[ipstr] = protomap + } + mapping := protomap[proto] + if port > 0 { + if _, ok := mapping.p[port]; !ok { + mapping.p[port] = struct{}{} + return port, nil + } + return 0, NewErrPortAlreadyAllocated(ipstr, port) + } + + port, err := mapping.findPort() + if err != nil { + return 0, err + } + return port, nil +} + +// ReleasePort releases port from global ports pool for specified ip and proto. +func ReleasePort(ip net.IP, proto string, port int) error { + mutex.Lock() + defer mutex.Unlock() + + if ip == nil { + ip = defaultIP + } + protomap, ok := globalMap[ip.String()] + if !ok { + return nil + } + delete(protomap[proto].p, port) + return nil +} + +// ReleaseAll releases all ports for all ips. +func ReleaseAll() error { + mutex.Lock() + globalMap = ipMapping{} + mutex.Unlock() + return nil +} + +func (pm *portMap) findPort() (int, error) { + if pm.last == 0 { + pm.p[BeginPortRange] = struct{}{} + pm.last = BeginPortRange + return BeginPortRange, nil + } + + for port := pm.last + 1; port != pm.last; port++ { + if port > EndPortRange { + port = BeginPortRange + } + + if _, ok := pm.p[port]; !ok { + pm.p[port] = struct{}{} + pm.last = port + return port, nil + } + } + return 0, ErrAllPortsAllocated +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portallocator/portallocator_test.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portallocator/portallocator_test.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portallocator/portallocator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portallocator/portallocator_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,216 @@ +package portallocator + +import ( + "net" + "testing" +) + +func reset() { + ReleaseAll() +} + +func TestRequestNewPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } +} + +func TestRequestSpecificPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } +} + +func TestReleasePort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } +} + +func TestReuseReleasedPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } +} + +func TestReleaseUnreadledPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + + switch err.(type) { + case ErrPortAlreadyAllocated: + default: + t.Fatalf("Expected port allocation error got %s", err) + } +} + +func TestUnknowProtocol(t *testing.T) { + defer reset() + + if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { + t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) + } +} + +func TestAllocateAllPorts(t *testing.T) { + defer reset() + + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } + } + + if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { + t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err) + } + + _, err := RequestPort(defaultIP, "udp", 0) + if err != nil { + t.Fatal(err) + } + + // release a port in the middle and ensure we get another tcp port + port := BeginPortRange + 5 + if err := ReleasePort(defaultIP, "tcp", port); err != nil { + t.Fatal(err) + } + newPort, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if newPort != port { + t.Fatalf("Expected port %d got %d", port, newPort) + } +} + +func BenchmarkAllocatePorts(b *testing.B) { + defer reset() + + for i := 0; i < b.N; i++ { + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + b.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + b.Fatalf("Expected port %d got %d", expected, port) + } + } + reset() + } +} + +func TestPortAllocation(t *testing.T) { + defer reset() + + ip := net.ParseIP("192.168.0.1") + ip2 := net.ParseIP("192.168.0.2") + if port, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } else if port != 80 { + t.Fatalf("Acquire(80) should return 80, not %d", port) + } + port, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port <= 0 { + t.Fatalf("Acquire(0) should return a non-zero port") + } + + if _, err := RequestPort(ip, "tcp", port); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + + if newPort, err := RequestPort(ip, "tcp", 0); err != nil { + t.Fatal(err) + } else if newPort == port { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } + + if _, err := RequestPort(ip, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if _, err := RequestPort(ip2, "tcp", 80); err != nil { + t.Fatalf("It should be possible to allocate the same port on a different interface") + } + if _, err := RequestPort(ip2, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if err := ReleasePort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + if _, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + port2, err := RequestPort(ip, "tcp", port+1) + if err != nil { + t.Fatal(err) + } + port3, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port3 == port2 { + t.Fatal("Requesting a dynamic port should never allocate a used port") + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mapper.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mapper.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mapper.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mapper.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,176 @@ +package portmapper + +import ( + "errors" + "fmt" + "net" + "sync" + + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" + "github.com/docker/docker/pkg/log" +) + +type mapping struct { + proto string + userlandProxy UserlandProxy + host net.Addr + container net.Addr +} + +var ( + chain *iptables.Chain + lock sync.Mutex + + // udp:ip:port + currentMappings = make(map[string]*mapping) + + NewProxy = NewProxyCommand +) + +var ( + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + ErrPortMappedForIP = errors.New("port is already mapped to ip") + ErrPortNotMapped = errors.New("port is not mapped") +) + +func SetIptablesChain(c *iptables.Chain) { + chain = c +} + +func Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) { + lock.Lock() + defer lock.Unlock() + + var ( + m *mapping + proto string + allocatedHostPort int + proxy UserlandProxy + ) + + switch container.(type) { + case *net.TCPAddr: + proto = "tcp" + if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port) + case *net.UDPAddr: + proto = "udp" + if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port) + default: + return nil, ErrUnknownBackendAddressType + } + + // release the allocated port on any further error during return. + defer func() { + if err != nil { + portallocator.ReleasePort(hostIP, proto, allocatedHostPort) + } + }() + + key := getKey(m.host) + if _, exists := currentMappings[key]; exists { + return nil, ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil { + return nil, err + } + + cleanup := func() error { + // need to undo the iptables rules before we return + proxy.Stop() + forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) + if err := portallocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil { + return err + } + + return nil + } + + if err := proxy.Start(); err != nil { + if err := cleanup(); err != nil { + return nil, fmt.Errorf("Error during port allocation cleanup: %v", err) + } + return nil, err + } + m.userlandProxy = proxy + currentMappings[key] = m + return m.host, nil +} + +func Unmap(host net.Addr) error { + lock.Lock() + defer lock.Unlock() + + key := getKey(host) + data, exists := currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + data.userlandProxy.Stop() + + delete(currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + log.Errorf("Error on iptables delete: %s", err) + } + + switch a := host.(type) { + case *net.TCPAddr: + return portallocator.ReleasePort(a.IP, "tcp", a.Port) + case *net.UDPAddr: + return portallocator.ReleasePort(a.IP, "udp", a.Port) + } + return nil +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + } + return nil, 0 +} + +func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if chain == nil { + return nil + } + return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mapper_test.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mapper_test.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mapper_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mapper_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,152 @@ +package portmapper + +import ( + "net" + "testing" + + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" +) + +func init() { + // override this func to mock out the proxy server + NewProxy = NewMockProxyCommand +} + +func reset() { + chain = nil + currentMappings = make(map[string]*mapping) +} + +func TestSetIptablesChain(t *testing.T) { + defer reset() + + c := &iptables.Chain{ + Name: "TEST", + Bridge: "192.168.1.1", + } + + if chain != nil { + t.Fatal("chain should be nil at init") + } + + SetIptablesChain(c) + if chain == nil { + t.Fatal("chain should not be nil after set") + } +} + +func TestMapPorts(t *testing.T) { + dstIp1 := net.ParseIP("192.168.0.1") + dstIp2 := net.ParseIP("192.168.0.2") + dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} + dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} + + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} + + addrEqual := func(addr1, addr2 net.Addr) bool { + return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String()) + } + + if host, err := Map(srcAddr1, dstIp1, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } else if !addrEqual(dstAddr1, host) { + t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s", + dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network()) + } + + if _, err := Map(srcAddr1, dstIp1, 80); err == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if _, err := Map(srcAddr2, dstIp1, 80); err == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if _, err := Map(srcAddr2, dstIp2, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Unmap(dstAddr1) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) == nil { + t.Fatalf("Port already released, but no error reported") + } +} + +func TestGetUDPKey(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + key := getKey(addr) + + if expected := "192.168.1.5:53/udp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetTCPKey(t *testing.T) { + addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} + + key := getKey(addr) + + if expected := "192.168.1.5:80/tcp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetUDPIPAndPort(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + ip, port := getIPAndPort(addr) + if expected := "192.168.1.5"; ip.String() != expected { + t.Fatalf("expected ip %s got %s", expected, ip) + } + + if ep := 53; port != ep { + t.Fatalf("expected port %d got %d", ep, port) + } +} + +func TestMapAllPortsSingleInterface(t *testing.T) { + dstIp1 := net.ParseIP("0.0.0.0") + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + + hosts := []net.Addr{} + var host net.Addr + var err error + + defer func() { + for _, val := range hosts { + Unmap(val) + } + }() + + for i := 0; i < 10; i++ { + for i := portallocator.BeginPortRange; i < portallocator.EndPortRange; i++ { + if host, err = Map(srcAddr1, dstIp1, 0); err != nil { + t.Fatal(err) + } + + hosts = append(hosts, host) + } + + if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil { + t.Fatalf("Port %d should be bound but is not", portallocator.BeginPortRange) + } + + for _, val := range hosts { + if err := Unmap(val); err != nil { + t.Fatal(err) + } + } + + hosts = []net.Addr{} + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mock_proxy.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mock_proxy.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/mock_proxy.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/mock_proxy.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +package portmapper + +import "net" + +func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + return &mockProxyCommand{} +} + +type mockProxyCommand struct { +} + +func (p *mockProxyCommand) Start() error { + return nil +} + +func (p *mockProxyCommand) Stop() error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/proxy.go docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/proxy.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/portmapper/proxy.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/portmapper/proxy.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +package portmapper + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "os/signal" + "strconv" + "syscall" + "time" + + "github.com/docker/docker/pkg/proxy" + "github.com/docker/docker/pkg/reexec" +) + +const userlandProxyCommandName = "docker-proxy" + +func init() { + reexec.Register(userlandProxyCommandName, execProxy) +} + +type UserlandProxy interface { + Start() error + Stop() error +} + +// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP +// proxies as separate processes. +type proxyCommand struct { + cmd *exec.Cmd +} + +// execProxy is the reexec function that is registered to start the userland proxies +func execProxy() { + f := os.NewFile(3, "signal-parent") + host, container := parseHostContainerAddrs() + + p, err := proxy.NewProxy(host, container) + if err != nil { + fmt.Fprintf(f, "1\n%s", err) + f.Close() + os.Exit(1) + } + go handleStopSignals(p) + fmt.Fprint(f, "0\n") + f.Close() + + // Run will block until the proxy stops + p.Run() +} + +// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP +// net.Addrs to map the host and container ports +func parseHostContainerAddrs() (host net.Addr, container net.Addr) { + var ( + proto = flag.String("proto", "tcp", "proxy protocol") + hostIP = flag.String("host-ip", "", "host ip") + hostPort = flag.Int("host-port", -1, "host port") + containerIP = flag.String("container-ip", "", "container ip") + containerPort = flag.Int("container-port", -1, "container port") + ) + + flag.Parse() + + switch *proto { + case "tcp": + host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + case "udp": + host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + default: + log.Fatalf("unsupported protocol %s", *proto) + } + + return host, container +} + +func handleStopSignals(p proxy.Proxy) { + s := make(chan os.Signal, 10) + signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP) + + for _ = range s { + p.Close() + + os.Exit(0) + } +} + +func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + args := []string{ + userlandProxyCommandName, + "-proto", proto, + "-host-ip", hostIP.String(), + "-host-port", strconv.Itoa(hostPort), + "-container-ip", containerIP.String(), + "-container-port", strconv.Itoa(containerPort), + } + + return &proxyCommand{ + cmd: &exec.Cmd{ + Path: reexec.Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies + }, + }, + } +} + +func (p *proxyCommand) Start() error { + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("proxy unable to open os.Pipe %s", err) + } + defer r.Close() + p.cmd.ExtraFiles = []*os.File{w} + if err := p.cmd.Start(); err != nil { + return err + } + w.Close() + + errchan := make(chan error, 1) + go func() { + buf := make([]byte, 2) + r.Read(buf) + + if string(buf) != "0\n" { + errStr, _ := ioutil.ReadAll(r) + errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) + return + } + errchan <- nil + }() + + select { + case err := <-errchan: + return err + case <-time.After(1 * time.Second): + return fmt.Errorf("Timed out proxy starting the userland proxy") + } +} + +func (p *proxyCommand) Stop() error { + if p.cmd.Process != nil { + if err := p.cmd.Process.Signal(os.Interrupt); err != nil { + return err + } + return p.cmd.Wait() + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/networkdriver/utils.go docker.io-1.3.2~dfsg1/daemon/networkdriver/utils.go --- docker.io-0.9.1~dfsg1/daemon/networkdriver/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/networkdriver/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,118 @@ +package networkdriver + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/docker/libcontainer/netlink" +) + +var ( + networkGetRoutesFct = netlink.NetworkGetRoutes + ErrNoDefaultRoute = errors.New("no default route") +) + +func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { + if len(nameservers) > 0 { + for _, ns := range nameservers { + _, nsNetwork, err := net.ParseCIDR(ns) + if err != nil { + return err + } + if NetworkOverlaps(toCheck, nsNetwork) { + return ErrNetworkOverlapsWithNameservers + } + } + } + return nil +} + +func CheckRouteOverlaps(toCheck *net.IPNet) error { + networks, err := networkGetRoutesFct() + if err != nil { + return err + } + + for _, network := range networks { + if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { + return ErrNetworkOverlaps + } + } + return nil +} + +// Detects overlap between one IPNet and another +func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { + if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { + return true + } + if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { + return true + } + return false +} + +// Calculates the first and last IP addresses in an IPNet +func NetworkRange(network *net.IPNet) (net.IP, net.IP) { + var ( + netIP = network.IP.To4() + firstIP = netIP.Mask(network.Mask) + lastIP = net.IPv4(0, 0, 0, 0).To4() + ) + + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +// Given a netmask, calculates the number of available hosts +func NetworkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} + +// Return the IPv4 address of a network interface +func GetIfaceAddr(name string) (net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, err + } + addrs, err := iface.Addrs() + if err != nil { + return nil, err + } + var addrs4 []net.Addr + for _, addr := range addrs { + ip := (addr.(*net.IPNet)).IP + if ip4 := ip.To4(); len(ip4) == net.IPv4len { + addrs4 = append(addrs4, addr) + } + } + switch { + case len(addrs4) == 0: + return nil, fmt.Errorf("Interface %v has no IP addresses", name) + case len(addrs4) > 1: + fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", + name, (addrs4[0].(*net.IPNet)).IP) + } + return addrs4[0], nil +} + +func GetDefaultRouteIface() (*net.Interface, error) { + rs, err := networkGetRoutesFct() + if err != nil { + return nil, fmt.Errorf("unable to get routes: %v", err) + } + for _, r := range rs { + if r.Default { + return r.Iface, nil + } + } + return nil, ErrNoDefaultRoute +} diff -Nru docker.io-0.9.1~dfsg1/daemon/network_settings.go docker.io-1.3.2~dfsg1/daemon/network_settings.go --- docker.io-0.9.1~dfsg1/daemon/network_settings.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/network_settings.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,43 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" +) + +// FIXME: move deprecated port stuff to nat to clean up the core. +type PortMapping map[string]string // Deprecated + +type NetworkSettings struct { + IPAddress string + IPPrefixLen int + MacAddress string + Gateway string + Bridge string + PortMapping map[string]PortMapping // Deprecated + Ports nat.PortMap +} + +func (settings *NetworkSettings) PortMappingAPI() *engine.Table { + var outs = engine.NewTable("", 0) + for port, bindings := range settings.Ports { + p, _ := nat.ParsePort(port.Port()) + if len(bindings) == 0 { + out := &engine.Env{} + out.SetInt("PrivatePort", p) + out.Set("Type", port.Proto()) + outs.Add(out) + continue + } + for _, binding := range bindings { + out := &engine.Env{} + h, _ := nat.ParsePort(binding.HostPort) + out.SetInt("PrivatePort", p) + out.SetInt("PublicPort", h) + out.Set("Type", port.Proto()) + out.Set("IP", binding.HostIp) + outs.Add(out) + } + } + return outs +} diff -Nru docker.io-0.9.1~dfsg1/daemon/pause.go docker.io-1.3.2~dfsg1/daemon/pause.go --- docker.io-0.9.1~dfsg1/daemon/pause.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/pause.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Pause(); err != nil { + return job.Errorf("Cannot pause container %s: %s", name, err) + } + container.LogEvent("pause") + return engine.StatusOK +} + +func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Unpause(); err != nil { + return job.Errorf("Cannot unpause container %s: %s", name, err) + } + container.LogEvent("unpause") + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/README.md docker.io-1.3.2~dfsg1/daemon/README.md --- docker.io-0.9.1~dfsg1/daemon/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +This directory contains code pertaining to running containers and storing images + +Code pertaining to running containers: + + - execdriver + - networkdriver + +Code pertaining to storing images: + + - graphdriver diff -Nru docker.io-0.9.1~dfsg1/daemon/resize.go docker.io-1.3.2~dfsg1/daemon/resize.go --- docker.io-0.9.1~dfsg1/daemon/resize.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/resize.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,53 @@ +package daemon + +import ( + "strconv" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + + if container := daemon.Get(name); container != nil { + if err := container.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} + +func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + execConfig, err := daemon.getExecConfig(name) + if err != nil { + return job.Error(err) + } + if err := execConfig.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/restart.go docker.io-1.3.2~dfsg1/daemon/restart.go --- docker.io-0.9.1~dfsg1/daemon/restart.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/restart.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) + } + container.LogEvent("restart") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/start.go docker.io-1.3.2~dfsg1/daemon/start.go --- docker.io-0.9.1~dfsg1/daemon/start.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/start.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,73 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + var ( + name = job.Args[0] + container = daemon.Get(name) + ) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + + if container.IsRunning() { + return job.Errorf("Container already started") + } + + // If no environment was set, then no hostconfig was passed. + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if len(job.Environ()) > 0 { + hostConfig := runconfig.ContainerHostConfigFromJob(job) + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return job.Error(err) + } + } + if err := container.Start(); err != nil { + container.LogEvent("die") + return job.Errorf("Cannot start container %s: %s", name, err) + } + + return engine.StatusOK +} + +func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + if err := parseSecurityOpt(container, hostConfig); err != nil { + return err + } + // Validate the HostConfig binds. Make sure that: + // the source exists + for _, bind := range hostConfig.Binds { + splitBind := strings.Split(bind, ":") + source := splitBind[0] + + // ensure the source exists on the host + _, err := os.Stat(source) + if err != nil && os.IsNotExist(err) { + err = os.MkdirAll(source, 0755) + if err != nil { + return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) + } + } + } + // Register any links from the host config before starting the container + if err := daemon.RegisterLinks(container, hostConfig); err != nil { + return err + } + container.SetHostConfig(hostConfig) + container.ToDisk() + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/daemon/state.go docker.io-1.3.2~dfsg1/daemon/state.go --- docker.io-0.9.1~dfsg1/daemon/state.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/state.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,206 @@ +package daemon + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/docker/pkg/units" +) + +type State struct { + sync.Mutex + Running bool + Paused bool + Restarting bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + return "exited" +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state already running it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns pid, that was passed to SetRunning +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPid(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetExitCode(), nil +} + +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +func (s *State) GetPid() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) GetExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +func (s *State) SetRunning(pid int) { + s.Lock() + s.setRunning(pid) + s.Unlock() +} + +func (s *State) setRunning(pid int) { + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + s.StartedAt = time.Now().UTC() + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +func (s *State) SetStopped(exitCode int) { + s.Lock() + s.setStopped(exitCode) + s.Unlock() +} + +func (s *State) setStopped(exitCode int) { + s.Running = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting is when docker hanldes the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestarting(exitCode int) { + s.Lock() + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) + s.Unlock() +} + +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +func (s *State) SetPaused() { + s.Lock() + s.Paused = true + s.Unlock() +} + +func (s *State) SetUnpaused() { + s.Lock() + s.Paused = false + s.Unlock() +} + +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} diff -Nru docker.io-0.9.1~dfsg1/daemon/state_test.go docker.io-1.3.2~dfsg1/daemon/state_test.go --- docker.io-0.9.1~dfsg1/daemon/state_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/state_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,102 @@ +package daemon + +import ( + "sync/atomic" + "testing" + "time" +) + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + started := make(chan struct{}) + var pid int64 + go func() { + runPid, _ := s.WaitRunning(-1 * time.Second) + atomic.StoreInt64(&pid, int64(runPid)) + close(started) + }() + s.SetRunning(i + 100) + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + runPid := int(atomic.LoadInt64(&pid)) + if runPid != i+100 { + t.Fatalf("Pid %v, expected %v", runPid, i+100) + } + if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { + t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.SetStopped(i) + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + started := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(started) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-started: + t.Log("Start callback fired") + } + s.SetRunning(42) + stopped := make(chan struct{}) + go func() { + s.WaitRunning(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Start callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Start callback fired") + } + +} diff -Nru docker.io-0.9.1~dfsg1/daemon/stop.go docker.io-1.3.2~dfsg1/daemon/stop.go --- docker.io-0.9.1~dfsg1/daemon/stop.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/stop.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if !container.IsRunning() { + return job.Errorf("Container already stopped") + } + if err := container.Stop(int(t)); err != nil { + return job.Errorf("Cannot stop container %s: %s\n", name, err) + } + container.LogEvent("stop") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/daemon/top.go docker.io-1.3.2~dfsg1/daemon/top.go --- docker.io-0.9.1~dfsg1/daemon/top.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/top.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,79 @@ +package daemon + +import ( + "os/exec" + "strconv" + "strings" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { + if len(job.Args) != 1 && len(job.Args) != 2 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) + } + var ( + name = job.Args[0] + psArgs = "-ef" + ) + + if len(job.Args) == 2 && job.Args[1] != "" { + psArgs = job.Args[1] + } + + if container := daemon.Get(name); container != nil { + if !container.IsRunning() { + return job.Errorf("Container %s is not running", name) + } + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return job.Error(err) + } + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return job.Errorf("Error running ps: %s", err) + } + + lines := strings.Split(string(output), "\n") + header := strings.Fields(lines[0]) + out := &engine.Env{} + out.SetList("Titles", header) + + pidIndex := -1 + for i, name := range header { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return job.Errorf("Couldn't find PID field in ps output") + } + + processes := [][]string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) + } + } + } + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK + + } + return job.Errorf("No such container: %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/utils.go docker.io-1.3.2~dfsg1/daemon/utils.go --- docker.io-0.9.1~dfsg1/daemon/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/runconfig" +) + +func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { + if config.PortSpecs != nil { + ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) + if err != nil { + return err + } + config.PortSpecs = nil + if len(bindings) > 0 { + if hostConfig == nil { + hostConfig = &runconfig.HostConfig{} + } + hostConfig.PortBindings = bindings + } + + if config.ExposedPorts == nil { + config.ExposedPorts = make(nat.PortSet, len(ports)) + } + for k, v := range ports { + config.ExposedPorts[k] = v + } + } + return nil +} + +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { + if hostConfig == nil { + return nil + } + + out := []string{} + + // merge in the lxc conf options into the generic config map + if lxcConf := hostConfig.LxcConf; lxcConf != nil { + for _, pair := range lxcConf { + // because lxc conf gets the driver name lxc.XXXX we need to trim it off + // and let the lxc driver add it back later if needed + parts := strings.SplitN(pair.Key, ".", 2) + out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) + } + } + + return out +} diff -Nru docker.io-0.9.1~dfsg1/daemon/utils_linux.go docker.io-1.3.2~dfsg1/daemon/utils_linux.go --- docker.io-0.9.1~dfsg1/daemon/utils_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/utils_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/docker/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff -Nru docker.io-0.9.1~dfsg1/daemon/utils_nolinux.go docker.io-1.3.2~dfsg1/daemon/utils_nolinux.go --- docker.io-0.9.1~dfsg1/daemon/utils_nolinux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/utils_nolinux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff -Nru docker.io-0.9.1~dfsg1/daemon/utils_test.go docker.io-1.3.2~dfsg1/daemon/utils_test.go --- docker.io-0.9.1~dfsg1/daemon/utils_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/utils_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,54 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +func TestMergeLxcConfig(t *testing.T) { + hostConfig := &runconfig.HostConfig{ + LxcConf: []utils.KeyValuePair{ + {Key: "lxc.cgroups.cpuset", Value: "1,2"}, + }, + } + + out := mergeLxcConfIntoOptions(hostConfig) + + cpuset := out[0] + if expected := "cgroups.cpuset=1,2"; cpuset != expected { + t.Fatalf("expected %s got %s", expected, cpuset) + } +} + +func TestRemoveLocalDns(t *testing.T) { + ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n" + + if result := utils.RemoveLocalDns([]byte(ns0)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } + + ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n" + if result := utils.RemoveLocalDns([]byte(ns1)); result != nil { + if ns0 != string(result) { + t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/daemon/volumes.go docker.io-1.3.2~dfsg1/daemon/volumes.go --- docker.io-0.9.1~dfsg1/daemon/volumes.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/volumes.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,333 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volumes" +) + +type Mount struct { + MountToPath string + container *Container + volume *volumes.Volume + Writable bool + copyData bool +} + +func (container *Container) prepareVolumes() error { + if container.Volumes == nil || len(container.Volumes) == 0 { + container.Volumes = make(map[string]string) + container.VolumesRW = make(map[string]bool) + if err := container.applyVolumesFrom(); err != nil { + return err + } + } + + return container.createVolumes() +} + +// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order +func (container *Container) sortedVolumeMounts() []string { + var mountPaths []string + for path := range container.Volumes { + mountPaths = append(mountPaths, path) + } + + sort.Strings(mountPaths) + return mountPaths +} + +func (container *Container) createVolumes() error { + mounts, err := container.parseVolumeMountConfig() + if err != nil { + return err + } + + for _, mnt := range mounts { + if err := mnt.initialize(); err != nil { + return err + } + } + + return nil +} + +func (m *Mount) initialize() error { + // No need to initialize anything since it's already been initialized + if _, exists := m.container.Volumes[m.MountToPath]; exists { + return nil + } + + // This is the full path to container fs + mntToPath + containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs) + if err != nil { + return err + } + m.container.VolumesRW[m.MountToPath] = m.Writable + m.container.Volumes[m.MountToPath] = m.volume.Path + m.volume.AddContainer(m.container.ID) + if m.Writable && m.copyData { + // Copy whatever is in the container at the mntToPath to the volume + copyExistingContents(containerMntPath, m.volume.Path) + } + + return nil +} + +func (container *Container) VolumePaths() map[string]struct{} { + var paths = make(map[string]struct{}) + for _, path := range container.Volumes { + paths[path] = struct{}{} + } + return paths +} + +func (container *Container) registerVolumes() { + for _, mnt := range container.VolumeMounts() { + mnt.volume.AddContainer(container.ID) + } +} + +func (container *Container) derefVolumes() { + for path := range container.VolumePaths() { + vol := container.daemon.volumes.Get(path) + if vol == nil { + log.Debugf("Volume %s was not found and could not be dereferenced", path) + continue + } + vol.RemoveContainer(container.ID) + } +} + +func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) { + var mounts = make(map[string]*Mount) + // Get all the bind mounts + for _, spec := range container.hostConfig.Binds { + path, mountToPath, writable, err := parseBindMountSpec(spec) + if err != nil { + return nil, err + } + // Check if a volume already exists for this and use it + vol, err := container.daemon.volumes.FindOrCreateVolume(path, writable) + if err != nil { + return nil, err + } + mounts[mountToPath] = &Mount{ + container: container, + volume: vol, + MountToPath: mountToPath, + Writable: writable, + } + } + + // Get the rest of the volumes + for path := range container.Config.Volumes { + // Check if this is already added as a bind-mount + path = filepath.Clean(path) + if _, exists := mounts[path]; exists { + continue + } + + // Check if this has already been created + if _, exists := container.Volumes[path]; exists { + continue + } + + vol, err := container.daemon.volumes.FindOrCreateVolume("", true) + if err != nil { + return nil, err + } + mounts[path] = &Mount{ + container: container, + MountToPath: path, + volume: vol, + Writable: true, + copyData: true, + } + } + + return mounts, nil +} + +func parseBindMountSpec(spec string) (string, string, bool, error) { + var ( + path, mountToPath string + writable bool + arr = strings.Split(spec, ":") + ) + + switch len(arr) { + case 2: + path = arr[0] + mountToPath = arr[1] + writable = true + case 3: + path = arr[0] + mountToPath = arr[1] + writable = validMountMode(arr[2]) && arr[2] == "rw" + default: + return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec) + } + + if !filepath.IsAbs(path) { + return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path) + } + + path = filepath.Clean(path) + mountToPath = filepath.Clean(mountToPath) + return path, mountToPath, writable, nil +} + +func (container *Container) applyVolumesFrom() error { + volumesFrom := container.hostConfig.VolumesFrom + + mountGroups := make([]map[string]*Mount, 0, len(volumesFrom)) + + for _, spec := range volumesFrom { + mountGroup, err := parseVolumesFromSpec(container.daemon, spec) + if err != nil { + return err + } + mountGroups = append(mountGroups, mountGroup) + } + + for _, mounts := range mountGroups { + for _, mnt := range mounts { + mnt.container = container + if err := mnt.initialize(); err != nil { + return err + } + } + } + return nil +} + +func validMountMode(mode string) bool { + validModes := map[string]bool{ + "rw": true, + "ro": true, + } + + return validModes[mode] +} + +func (container *Container) setupMounts() error { + mounts := []execdriver.Mount{ + {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}, + } + + if container.HostnamePath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) + } + + if container.HostsPath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) + } + + // Mount user specified volumes + // Note, these are not private because you may want propagation of (un)mounts from host + // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you + // want this new mount in the container + // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic) + for _, path := range container.sortedVolumeMounts() { + mounts = append(mounts, execdriver.Mount{ + Source: container.Volumes[path], + Destination: path, + Writable: container.VolumesRW[path], + }) + } + + container.command.Mounts = mounts + return nil +} + +func parseVolumesFromSpec(daemon *Daemon, spec string) (map[string]*Mount, error) { + specParts := strings.SplitN(spec, ":", 2) + if len(specParts) == 0 { + return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec) + } + + c := daemon.Get(specParts[0]) + if c == nil { + return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0]) + } + + mounts := c.VolumeMounts() + + if len(specParts) == 2 { + mode := specParts[1] + if !validMountMode(mode) { + return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode) + } + + // Set the mode for the inheritted volume + for _, mnt := range mounts { + // Ensure that if the inherited volume is not writable, that we don't make + // it writable here + mnt.Writable = mnt.Writable && (mode == "rw") + } + } + + return mounts, nil +} + +func (container *Container) VolumeMounts() map[string]*Mount { + mounts := make(map[string]*Mount) + + for mountToPath, path := range container.Volumes { + if v := container.daemon.volumes.Get(path); v != nil { + mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]} + } + } + + return mounts +} + +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// into the destination file +func copyOwnership(source, destination string) error { + var stat syscall.Stat_t + + if err := syscall.Stat(source, &stat); err != nil { + return err + } + + if err := os.Chown(destination, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode)) +} diff -Nru docker.io-0.9.1~dfsg1/daemon/wait.go docker.io-1.3.2~dfsg1/daemon/wait.go --- docker.io-0.9.1~dfsg1/daemon/wait.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/daemon/wait.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,20 @@ +package daemon + +import ( + "time" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + status, _ := container.WaitStop(-1 * time.Second) + job.Printf("%d\n", status) + return engine.StatusOK + } + return job.Errorf("%s: No such container: %s", job.Name, name) +} diff -Nru docker.io-0.9.1~dfsg1/debian/changelog docker.io-1.3.2~dfsg1/debian/changelog --- docker.io-0.9.1~dfsg1/debian/changelog 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/changelog 2014-11-26 09:11:16.000000000 +0000 @@ -1,3 +1,140 @@ +docker.io (1.3.2~dfsg1-1ubuntu1~andre1) trusty; urgency=medium + + * Backport from Ubuntu / Debian without changes + + -- Andre Klitzing Wed, 26 Nov 2014 10:11:16 +0100 + +docker.io (1.3.2~dfsg1-1ubuntu1) vivid; urgency=low + + * Merge from Debian unstable. Remaining changes: + - d/p/sync-apparmor-with-lxc.patch: Update AppArmor policy to be + in sync with LXC. + * Dropped changes, equivalents included in Debian updates: + - d/p/support-no-env-default-file.patch. + + -- James Page Tue, 25 Nov 2014 17:59:07 +0000 + +docker.io (1.3.2~dfsg1-1) unstable; urgency=high + + * Severity is set to high due to the sensitive nature of the CVEs this + upload fixes. + * Update to 1.3.2 upstream release + - Fix for CVE-2014-6407 (Archive extraction host privilege escalation) + - Fix for CVE-2014-6408 (Security options applied to image could lead + to container escalation) + * Remove Daniel Mizyrycki from Uploaders. Thanks for all your work! + + -- Paul Tagliamonte Mon, 24 Nov 2014 19:14:28 -0500 + +docker.io (1.3.1~dfsg1-2) unstable; urgency=medium + + * Remove deprecated /usr/bin/docker.io symlink + - added as a temporary shim in 1.0.0~dfsg1-1 (13 Jun 2014) + - unused by package-installed files in 1.2.0~dfsg1-1 (13 Sep 2014) + + -- Tianon Gravi Fri, 07 Nov 2014 13:11:34 -0700 + +docker.io (1.3.1~dfsg1-1) unstable; urgency=high + + * Update to 1.3.1 upstream release + - fix for CVE-2014-5277 + - https://groups.google.com/d/topic/docker-user/oYm0i3xShJU/discussion + + -- Tianon Gravi Mon, 03 Nov 2014 08:26:29 -0700 + +docker.io (1.3.0~dfsg1-1) unstable; urgency=medium + + * Updated to 1.3.0 upstream release. + * Enable systemd socket activation (Closes: #752555). + + -- Tianon Gravi Fri, 17 Oct 2014 00:56:07 -0600 + +docker.io (1.2.0~dfsg1-2) unstable; urgency=medium + + * Added "golang-docker-dev" package for the reusable bits of Docker's source. + + -- Tianon Gravi Thu, 09 Oct 2014 00:08:11 +0000 + +docker.io (1.2.0~dfsg1-1ubuntu2) vivid; urgency=medium + + * Reenable socket activation (race fixed with systemd 215) + * debian/patches/support-no-env-default-file.patch: + - Support removed /etc/default/docker under systemd + + -- Didier Roche Thu, 20 Nov 2014 10:18:42 +0100 + +docker.io (1.2.0~dfsg1-1ubuntu1) utopic; urgency=medium + + * debian/patches/sync-apparmor-with-lxc.patch: update AppArmor policy to + by in sync with LXC. Specifically this: + - reorganizes the rules to allow for easier comparison with other + container policy + - adds comments for many rules + - adds bare dbus rule + - adds ptrace rule to allow ptracing ourselves + - adds deny mount options=(ro, remount, silent) -> / + - allows hugetlbfs + - adds cgmanager mount + - adds /sys/fs/pstore mount + - more specific /sys/kernel/security mount options + - more specific /sys mount options + - more specific /proc/sys/kernel/* deny rules + - more specific /proc/sys/net deny rules + - more specific /sys/class deny rules + - more specific /sys/devices deny rules + - more specific /sys/fs deny rules + + -- Jamie Strandboge Wed, 01 Oct 2014 13:24:01 -0500 + +docker.io (1.2.0~dfsg1-1) unstable; urgency=medium + + * Updated to 1.2.0 upstream release (Closes: #757183, #757023, #757024). + * Added upstream man pages. + * Updated bash and zsh completions to be installed as "docker" and "_docker". + * Updated init scripts to also be installed as "docker". + * Fixed "equivalent" typo in README.Debian (Closes: #756395). Thanks Reuben! + * Removed "docker.io" mention in README.Debian (Closes: #756290). Thanks + Olivier! + + -- Tianon Gravi Sat, 13 Sep 2014 11:43:17 -0600 + +docker.io (1.0.0~dfsg1-1) unstable; urgency=medium + + * Updated to 1.0.0 upstream release. Huzzah! + * I've removed what is commonly called a `button' of patches against + the docker package. Exact patches: + - bash-completion-docker.io.patch + - systemd-docker.io.patch + - sysvinit-provides-docker.io.patch + - zsh-completion-docker.io.patch + - mkimage-docker.io.patch + * I know y'all are guessing why; and the answer's pretty simple -- we're + no longer docker.io(1). Since the src:docker package now ships wmdocker(1), + we can safely declare a breaks/replaces on the pre-wmdocker version of the + package, allowing existing users to safely update, both src:docker and + src:docker.io side. This brings us into line with other distros, which + now ship wmdocker(1) and docker(1). + * As a stop-gap, I'm still shipping a docker.io(1) symlink to allow + migration away. + + -- Paul Tagliamonte Fri, 13 Jun 2014 21:04:53 -0400 + +docker.io (0.11.1~dfsg1-1) unstable; urgency=medium + + [ Paul Tagliamonte ] + * Use EnvironmentFile with the systemd unit file. (Closes: #746774) + * Patch out version checking code. (Closes: #747140) + * Remove all host checking for non-amd64 host arches. Let docker build + and run on all platforms now. (Closes: #747139, #739914) + + [ Tianon Gravi ] + * Updated to 0.11.1 upstream release. + * Added backported upstream patch for removing RemoteAddr assumptions + that cause events to not be delivered to more than one unix socket + listener. + + -- Tianon Gravi Fri, 09 May 2014 17:30:45 -0400 + docker.io (0.9.1~dfsg1-2) unstable; urgency=medium * Added upstream apparmor patch to fix newer apparmor versions (such as the @@ -99,3 +236,4 @@ alteholz. -- Paul Tagliamonte Tue, 07 Jan 2014 21:06:10 -0500 + diff -Nru docker.io-0.9.1~dfsg1/debian/control docker.io-1.3.2~dfsg1/debian/control --- docker.io-0.9.1~dfsg1/debian/control 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/control 2014-11-25 03:53:15.000000000 +0000 @@ -1,8 +1,9 @@ Source: docker.io Section: admin Priority: optional -Maintainer: Paul Tagliamonte -Uploaders: Daniel Mizyrycki , +Maintainer: Ubuntu Developers +XSBC-Original-Maintainer: Paul Tagliamonte +Uploaders: Docker Packaging Team , Tianon Gravi , Johan Euphrosine Build-Depends: bash-completion, @@ -10,19 +11,25 @@ debhelper (>=9), dh-golang (>= 1.1), dh-systemd, - golang (>= 2:1.2.1-2~), - golang-context-dev, - golang-go.net-dev, - golang-gocapability-dev, - golang-gosqlite-dev, - golang-mux-dev, - golang-pty-dev, + go-md2man, + golang (>= 2:1.2-3~), + golang (>= 2:1.2.1-2~) | golang (<< 2:1.2.1~), + golang (>= 2:1.3-4~) | golang (= 2:1.3-1) | golang (<< 2:1.3~), + golang-context-dev (>= 0.0~git20140604~), + golang-dbus-dev (>= 1~), + golang-go-patricia-dev (>= 1.0.1~), + golang-go-systemd-dev (>= 2~), + golang-go.net-dev (>= 0.0~hg20130530~), + golang-gocapability-dev (>= 0.0~git20140516~), + golang-gosqlite-dev (>= 0.0~hg20130530~), + golang-mux-dev (>= 0.0~git20140505~), + golang-pty-dev (>= 0.0~git20140315.1.67e2db2-1~), libapparmor-dev, libdevmapper-dev -Standards-Version: 3.9.5 -Homepage: https://github.com/dotcloud/docker +Standards-Version: 3.9.6 +Homepage: https://github.com/docker/docker Vcs-Git: git://anonscm.debian.org/docker/docker.io.git -Vcs-Browser: http://anonscm.debian.org/gitweb/?p=docker/docker.io.git;a=summary +Vcs-Browser: http://anonscm.debian.org/cgit/docker/docker.io.git Package: docker.io Architecture: linux-any @@ -31,7 +38,10 @@ ca-certificates, cgroupfs-mount | cgroup-lite, git, - xz-utils + xz-utils, + ${apparmor:Recommends} +Replaces: docker (<< 1.5~) +Breaks: docker (<< 1.5~) Suggests: btrfs-tools, debootstrap, lxc, rinse Built-Using: ${misc:Built-Using} Description: Linux container runtime @@ -43,9 +53,9 @@ large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc. . - This package contains the daemon and client. Using docker.io on non-amd64 - hosts is not supported at this time. Please be careful when using it on - anything besides amd64. + This package contains the daemon and client. Using docker.io on non-amd64 hosts + is not supported at this time. Please be careful when using it on anything + besides amd64. . Also, note that kernel version 3.8 or above is required for proper operation of the daemon process, and that any lower versions may have subtle and/or glaring @@ -57,5 +67,14 @@ Recommends: vim-addon-manager Suggests: docker.io Description: Docker container engine - Vim highlighting syntax files - This package provides syntax files for the Vim editor for editing - Dockerfiles from the Docker container engine. + This package provides syntax files for the Vim editor for editing Dockerfiles + from the Docker container engine. + +Package: golang-docker-dev +Architecture: all +Depends: ${misc:Depends} +Built-Using: ${misc:Built-Using} +Description: Externally reusable Go packages included with Docker + These packages are intentionally developed by upstream in such a way that they + are reusable to projects outside Docker and only rely on each other or other + external dependencies to be built. diff -Nru docker.io-0.9.1~dfsg1/debian/copyright docker.io-1.3.2~dfsg1/debian/copyright --- docker.io-0.9.1~dfsg1/debian/copyright 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/copyright 2014-11-25 03:53:15.000000000 +0000 @@ -1,10 +1,10 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Docker -Upstream-Contact: Docker, Inc. -Source: https://github.com/dotcloud/docker +Upstream-Contact: Docker, Inc. +Source: https://github.com/docker/docker Files: * -Copyright: 2012-2014, Docker, Inc. +Copyright: 2012-2014 Docker, Inc. License: Apache-2.0 Files: debian/* @@ -16,25 +16,21 @@ 2014 Prach Pongpanich License: Apache-2.0 -Files: hack/infrastructure/docker-ci/buildbot/github.py -Copyright: Buildbot Team Members -License: GPL-2 - Files: contrib/init/openrc/docker.initd Copyright: 1999-2013 Gentoo Foundation License: GPL-2 -Files: contrib/vim-syntax/* +Files: contrib/syntax/vim/* Copyright: 2013 Honza Pokorny License: BSD-2-clause Files: pkg/mflag/* Copyright: 2014 The Docker & Go Authors -License: BSD-3-clause (Google) +License: BSD-3-clause-Google Files: contrib/completion/zsh/* Copyright: 2013-2014 Felix Riedel -License: BSD-3-clause (Generic) +License: BSD-3-clause-Generic License: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); @@ -90,7 +86,7 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -License: BSD-3-clause (Google) +License: BSD-3-clause-Google Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -117,7 +113,7 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -License: BSD-3-clause (Generic) +License: BSD-3-clause-Generic Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright diff -Nru docker.io-0.9.1~dfsg1/debian/Dockerfile docker.io-1.3.2~dfsg1/debian/Dockerfile --- docker.io-0.9.1~dfsg1/debian/Dockerfile 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/Dockerfile 2014-11-25 03:53:15.000000000 +0000 @@ -1,43 +1,19 @@ -FROM debian:sid -MAINTAINER Tianon Gravi - -# build deps -RUN apt-get update && apt-get install -yq \ - devscripts \ - equivs \ - libcrypt-ssleay-perl \ - libfile-fcntllock-perl \ - libwww-perl \ - lintian \ - python3-debian \ - --no-install-recommends - -# need an editor for "dch -i" -RUN apt-get update && apt-get install -yq vim-nox --no-install-recommends - -# need deb-src for compiling packages -RUN echo 'deb-src http://http.debian.net/debian sid main' >> /etc/apt/sources.list +FROM tianon/debian-devel # start by adding just "debian/control" so we can get mk-build-deps with maximum caching -ADD control /usr/src/docker.io/debian/ +COPY control /usr/src/docker.io/debian/ WORKDIR /usr/src/docker.io # get all the build deps of _this_ package in a nice repeatable way -RUN apt-get update && mk-build-deps -irt'apt-get --no-install-recommends -yq' debian/control +RUN apt-get update && mk-build-deps -irt'apt-get --no-install-recommends -yV' debian/control # need our debian/ directory to compile _this_ package -ADD . /usr/src/docker.io/debian +COPY . /usr/src/docker.io/debian # go download and unpack our upstream source RUN uscan --force-download --verbose --download-current-version -RUN origtargz --unpack +RUN DOCKER_TARBALLS=.. ./debian/helpers/download-libcontainer +RUN /tianon/extract-origtargz.sh # tianon is _really_ lazy, and likes a preseeded bash history -RUN { \ - echo "DEBFULLNAME='' DEBEMAIL='' dch -i"; \ - echo 'lintian --ftp-master-rejects'; \ - echo 'uscan --force-download --verbose --download-current-version'; \ - echo 'origtargz --unpack && debuild -us -uc --lintian-opts "-EvIL+pedantic"'; \ -} >> /.bash_history - -CMD [ "debuild", "-us", "-uc", "--lintian-opts", "-EvIL+pedantic" ] +RUN echo '/tianon/extract-origtargz.sh && dpkg-buildpackage -us -uc && lintian -EvIL+pedantic' >> /root/.bash_history diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.1 docker.io-1.3.2~dfsg1/debian/docker.io.1 --- docker.io-0.9.1~dfsg1/debian/docker.io.1 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.1 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -'\" -*- coding: us-ascii -*- -.if \n(.g .ds T< \\FC -.if \n(.g .ds T> \\F[\n[.fam]] -.de URL -\\$2 \(la\\$1\(ra\\$3 -.. -.if \n(.g .mso www.tmac -.TH docker.io 1 "Sat Jul 27 11:46:22 UTC 2013" "" "" -.SH NAME -docker.io \- A self-sufficient runtime for linux containers. -.SH SYNOPSIS -'nh -.fi -.ad l -\fBdocker.io\fR [OPTIONS] COMMAND [arg...]\kx -.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) -'in \n(.iu+\nxu -'in \n(.iu-\nxu -.ad b -'hy -.SH DESCRIPTION -Docker complements LXC with a high-level API which operates at the process -level. It runs unix processes with strong guarantees of isolation and -repeatability across servers. -Docker is a great building block for automating distributed systems: -large-scale web deployments, database clusters, continuous deployment systems, -private PaaS, service-oriented architectures, etc. -.P -\fBdocker.io\fR runs in three ways: -.TP -\fB*\fR as a daemon to manage LXC containers on your Linux host -sudo docker.io -d -.TP -\fB*\fR as a CLI which talks to the daemon’s REST API -docker.io run ... -.TP -\fB*\fR as a client of Repositories that let you share what you’ve built -docker.io pull, docker.io commit -.SH EXAMPLES -.TP -docker.io -list COMMANDS available -.TP -docker.io --help -list OPTIONS available -.TP -docker.io run busybox echo 'Hello world' -Pull busybox image, launch a container with it, execute echo inside, and greet -.SH "SEE ALSO" -The full docker.io documentation is at http://docs.docker.io -.br -In Debian, documentation is available on /usr/share/doc/docker.io/README.md.gz -.SH AUTHOR -Daniel Mizyrycki <\*(T> diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.bash-completion docker.io-1.3.2~dfsg1/debian/docker.io.bash-completion --- docker.io-0.9.1~dfsg1/debian/docker.io.bash-completion 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.bash-completion 2014-11-25 03:53:15.000000000 +0000 @@ -1 +1 @@ -contrib/completion/bash/docker docker.io +contrib/completion/bash/docker diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.default docker.io-1.3.2~dfsg1/debian/docker.io.default --- docker.io-0.9.1~dfsg1/debian/docker.io.default 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.default 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -# Docker Upstart and SysVinit configuration file - -# Customize location of Docker binary (especially for development testing). -#DOCKER="/usr/local/bin/docker" - -# Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4" - -# If you need Docker to use an HTTP proxy, it can also be specified here. -#export http_proxy="http://127.0.0.1:3128/" - -# This is also a handy place to tweak where Docker's temporary files go. -#export TMPDIR="/mnt/bigdrive/docker-tmp" diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.docker.default docker.io-1.3.2~dfsg1/debian/docker.io.docker.default --- docker.io-0.9.1~dfsg1/debian/docker.io.docker.default 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.docker.default 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +# Docker Upstart and SysVinit configuration file + +# Customize location of Docker binary (especially for development testing). +#DOCKER="/usr/local/bin/docker" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export TMPDIR="/mnt/bigdrive/docker-tmp" diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.docker.init docker.io-1.3.2~dfsg1/debian/docker.io.docker.init --- docker.io-0.9.1~dfsg1/debian/docker.io.docker.init 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.docker.init 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,141 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=$(basename $0) + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKER=/usr/bin/$BASE +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) +if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 +fi + +# Check docker is present +if [ ! -x $DOCKER ]; then + log_failure_msg "$DOCKER not present or not executable" + exit 1 +fi + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + if [ "$BASH" ]; then + ulimit -u 1048576 + else + ulimit -p 1048576 + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKER" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -d -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" + log_end_msg $? + ;; + + restart) + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + fail_unless_root + $0 restart + ;; + + status) + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.docker.upstart docker.io-1.3.2~dfsg1/debian/docker.io.docker.upstart --- docker.io-0.9.1~dfsg1/debian/docker.io.docker.upstart 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.docker.upstart 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,41 @@ +description "Docker daemon" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 + +respawn + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKER=/usr/bin/$UPSTART_JOB + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKER" -d $DOCKER_OPTS +end script diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.init docker.io-1.3.2~dfsg1/debian/docker.io.init --- docker.io-0.9.1~dfsg1/debian/docker.io.init 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.init 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -#!/bin/sh - -### BEGIN INIT INFO -# Provides: docker -# Required-Start: $syslog $remote_fs -# Required-Stop: $syslog $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Create lightweight, portable, self-sufficient containers. -# Description: -# Docker is an open-source project to easily create lightweight, portable, -# self-sufficient containers from any application. The same container that a -# developer builds and tests on a laptop can run at scale, in production, on -# VMs, bare metal, OpenStack clusters, public clouds and more. -### END INIT INFO - -export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin - -BASE=$(basename $0) - -# modify these in /etc/default/$BASE (/etc/default/docker) -DOCKER=/usr/bin/$BASE -DOCKER_PIDFILE=/var/run/$BASE.pid -DOCKER_LOGFILE=/var/log/$BASE.log -DOCKER_OPTS= -DOCKER_DESC="Docker" - -# Get lsb functions -. /lib/lsb/init-functions - -if [ -f /etc/default/$BASE ]; then - . /etc/default/$BASE -fi - -# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) -if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then - log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" - exit 1 -fi - -# Check docker is present -if [ ! -x $DOCKER ]; then - log_failure_msg "$DOCKER not present or not executable" - exit 1 -fi - -fail_unless_root() { - if [ "$(id -u)" != '0' ]; then - log_failure_msg "$DOCKER_DESC must be run as root" - exit 1 - fi -} - -cgroupfs_mount() { - # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount - if grep -v '^#' /etc/fstab | grep -q cgroup \ - || [ ! -e /proc/cgroups ] \ - || [ ! -d /sys/fs/cgroup ]; then - return - fi - if ! mountpoint -q /sys/fs/cgroup; then - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - fi - ( - cd /sys/fs/cgroup - for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do - mkdir -p $sys - if ! mountpoint -q $sys; then - if ! mount -n -t cgroup -o $sys cgroup $sys; then - rmdir $sys || true - fi - fi - done - ) -} - -case "$1" in - start) - fail_unless_root - - cgroupfs_mount - - touch "$DOCKER_LOGFILE" - chgrp docker "$DOCKER_LOGFILE" - - log_begin_msg "Starting $DOCKER_DESC: $BASE" - start-stop-daemon --start --background \ - --no-close \ - --exec "$DOCKER" \ - --pidfile "$DOCKER_PIDFILE" \ - -- \ - -d -p "$DOCKER_PIDFILE" \ - $DOCKER_OPTS \ - >> "$DOCKER_LOGFILE" 2>&1 - log_end_msg $? - ;; - - stop) - fail_unless_root - log_begin_msg "Stopping $DOCKER_DESC: $BASE" - start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE" - log_end_msg $? - ;; - - restart) - fail_unless_root - docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` - [ -n "$docker_pid" ] \ - && ps -p $docker_pid > /dev/null 2>&1 \ - && $0 stop - $0 start - ;; - - force-reload) - fail_unless_root - $0 restart - ;; - - status) - status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker - ;; - - *) - echo "Usage: $0 {start|stop|restart|status}" - exit 1 - ;; -esac - -exit 0 diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.install docker.io-1.3.2~dfsg1/debian/docker.io.install --- docker.io-0.9.1~dfsg1/debian/docker.io.install 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.install 2014-11-25 03:53:15.000000000 +0000 @@ -1,2 +1,6 @@ contrib/*-integration usr/share/docker.io/contrib/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ contrib/mk* usr/share/docker.io/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker.io/contrib/ diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.maintscript docker.io-1.3.2~dfsg1/debian/docker.io.maintscript --- docker.io-0.9.1~dfsg1/debian/docker.io.maintscript 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.maintscript 2014-11-25 03:53:15.000000000 +0000 @@ -0,0 +1,4 @@ +mv_conffile /etc/bash_completion.d/docker.io /etc/bash_completion.d/docker 1.2.0~ +mv_conffile /etc/default/docker.io /etc/default/docker 1.2.0~ +mv_conffile /etc/init.d/docker.io /etc/init.d/docker 1.2.0~ +mv_conffile /etc/init/docker.io.conf /etc/init/docker.conf 1.2.0~ diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.manpages docker.io-1.3.2~dfsg1/debian/docker.io.manpages --- docker.io-0.9.1~dfsg1/debian/docker.io.manpages 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.manpages 2014-11-25 03:53:15.000000000 +0000 @@ -0,0 +1 @@ +docs/man/man*/* diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.service docker.io-1.3.2~dfsg1/debian/docker.io.service --- docker.io-0.9.1~dfsg1/debian/docker.io.service 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.service 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -[Unit] -Description=Docker Application Container Engine -Documentation=http://docs.docker.io -After=network.target - -[Service] -ExecStart=/usr/bin/docker -d -Restart=on-failure -LimitNOFILE=1048576 -LimitNPROC=1048576 - -[Install] -WantedBy=multi-user.target diff -Nru docker.io-0.9.1~dfsg1/debian/docker.io.upstart docker.io-1.3.2~dfsg1/debian/docker.io.upstart --- docker.io-0.9.1~dfsg1/debian/docker.io.upstart 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/docker.io.upstart 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -description "Docker daemon" - -start on filesystem -stop on runlevel [!2345] - -respawn - -pre-start script - # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount - if grep -v '^#' /etc/fstab | grep -q cgroup \ - || [ ! -e /proc/cgroups ] \ - || [ ! -d /sys/fs/cgroup ]; then - exit 0 - fi - if ! mountpoint -q /sys/fs/cgroup; then - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - fi - ( - cd /sys/fs/cgroup - for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do - mkdir -p $sys - if ! mountpoint -q $sys; then - if ! mount -n -t cgroup -o $sys cgroup $sys; then - rmdir $sys || true - fi - fi - done - ) -end script - -script - # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) - DOCKER=/usr/bin/$UPSTART_JOB - DOCKER_OPTS= - if [ -f /etc/default/$UPSTART_JOB ]; then - . /etc/default/$UPSTART_JOB - fi - "$DOCKER" -d $DOCKER_OPTS -end script diff -Nru docker.io-0.9.1~dfsg1/debian/helpers/download-libcontainer docker.io-1.3.2~dfsg1/debian/helpers/download-libcontainer --- docker.io-0.9.1~dfsg1/debian/helpers/download-libcontainer 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/helpers/download-libcontainer 2014-11-25 03:53:15.000000000 +0000 @@ -0,0 +1,49 @@ +#!/bin/bash +set -e + +mkdir -p "${DOCKER_TARBALLS:=../tarballs}" + +pkg="$(dpkg-parsechangelog -SSource)" +ver="$(dpkg-parsechangelog -SVersion)" +origVer="${ver%-*}" # strip everything from the last dash +origVer="$(echo "$origVer" | sed -r 's/^[0-9]+://')" # strip epoch +upstreamVer="${origVer%%[+~]*}" +origTarballPrefix="${DOCKER_TARBALLS}/${pkg}_${origVer}.orig" +unprunedTarballPrefix="${DOCKER_TARBALLS}/${pkg}_${upstreamVer}.orig" + +if command -v curl &> /dev/null; then + curl='curl -sSL' +elif command -v wget &> /dev/null; then + curl='wget -qO-' +else + echo >&2 'error: missing "curl" or "wget" - install one or the other' + exit 1 +fi + +get_hack_vendor() { + if [ -e "${origTarballPrefix}.tar.gz" ]; then + # if we have the main orig tarball handy, let's prefer that + tar -xzOf "${origTarballPrefix}.tar.gz" --wildcards '*/hack/vendor.sh' + else + # but fall back to grabbing it raw from github otherwise + $curl "https://raw.githubusercontent.com/docker/docker/v${upstreamVer}/hack/vendor.sh" + fi +} + +if libcontainerCommit="$(get_hack_vendor | grep -m1 '^clone git github.com/docker/libcontainer ' | cut -d' ' -f4)" && [ "$libcontainerCommit" ]; then + $curl "https://github.com/docker/libcontainer/archive/${libcontainerCommit}.tar.gz" > "${unprunedTarballPrefix}-libcontainer.tar.gz" + + echo "successfully fetched ${unprunedTarballPrefix}-libcontainer.tar.gz" + echo " (from libcontainer commit $libcontainerCommit)" + + "$(dirname "$(readlink -f "$BASH_SOURCE")")/../repack.sh" --upstream-version "$upstreamVer" "${unprunedTarballPrefix}-libcontainer.tar.gz" +fi + +if libtrustCommit="$(get_hack_vendor | grep -m1 '^clone git github.com/docker/libtrust ' | cut -d' ' -f4)" && [ "$libtrustCommit" ]; then + $curl "https://github.com/docker/libtrust/archive/${libtrustCommit}.tar.gz" > "${unprunedTarballPrefix}-libtrust.tar.gz" + + echo "successfully fetched ${unprunedTarballPrefix}-libtrust.tar.gz" + echo " (from libtrust commit $libtrustCommit)" + + "$(dirname "$(readlink -f "$BASH_SOURCE")")/../repack.sh" --upstream-version "$upstreamVer" "${unprunedTarballPrefix}-libtrust.tar.gz" +fi diff -Nru docker.io-0.9.1~dfsg1/debian/helpers/gitcommit.sh docker.io-1.3.2~dfsg1/debian/helpers/gitcommit.sh --- docker.io-0.9.1~dfsg1/debian/helpers/gitcommit.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/helpers/gitcommit.sh 2014-11-25 03:53:15.000000000 +0000 @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +uVersion="$1" +dVersion="$2" + +if [ -z "$uVersion" ]; then + uVersion="$(cat VERSION)" +fi +if [ -z "$dVersion" ]; then + dVersion="$(dpkg-parsechangelog --show-field Version)" +fi + +if [ "${uVersion%-dev}" = "$uVersion" ]; then + # this is a straight-up release! easy-peasy + exec awk '/^'"$uVersion"':/ { print $2 }' debian/upstream-version-gitcommits +fi + +# must be a nightly, so let's look for clues about what the git commit is + +if git rev-parse &> /dev/null; then + # well, this will be easy ;) + exec git rev-parse --short HEAD +fi + +if [ "${dVersion#*+*+}" != "$dVersion" ]; then + # must be something like "1.1.2+10013+8c38a3d-1~utopic1" (nightly!) + commit="${dVersion#*+*+}" + commit="${commit%%-*}" + exec echo "$commit" +fi + +# unknown... +echo >&2 'warning: unable to determine DOCKER_GITCOMMIT' diff -Nru docker.io-0.9.1~dfsg1/debian/patches/5049-trusty-apparmor-fix.patch docker.io-1.3.2~dfsg1/debian/patches/5049-trusty-apparmor-fix.patch --- docker.io-0.9.1~dfsg1/debian/patches/5049-trusty-apparmor-fix.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/5049-trusty-apparmor-fix.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -Description: Fix issues with newer apparmor -Origin: upstream, https://github.com/dotcloud/docker/pull/5049 -Applied-Upstream: 0.10.0, https://github.com/dotcloud/docker/pull/5049 - -diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go -index 4e1c951..4c66459 100644 ---- a/pkg/libcontainer/apparmor/setup.go -+++ b/pkg/libcontainer/apparmor/setup.go -@@ -11,13 +11,10 @@ import ( - const DefaultProfilePath = "/etc/apparmor.d/docker" - const DefaultProfile = ` - # AppArmor profile from lxc for containers. --@{HOME}=@{HOMEDIRS}/*/ /root/ --@{HOMEDIRS}=/home/ --#@{HOMEDIRS}+= --@{multiarch}=*-linux-gnu* --@{PROC}=/proc/ - -+#include - profile docker-default flags=(attach_disconnected,mediate_deleted) { -+ #include - network, - capability, - file, diff -Nru docker.io-0.9.1~dfsg1/debian/patches/bash-completion-docker.io.patch docker.io-1.3.2~dfsg1/debian/patches/bash-completion-docker.io.patch --- docker.io-0.9.1~dfsg1/debian/patches/bash-completion-docker.io.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/bash-completion-docker.io.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -Description: Rename binary file from docker to docker.io -Author: Tianon Gravi -Forwarded: not-needed - -diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker -index 1449330..b658896 100755 ---- a/contrib/completion/bash/docker -+++ b/contrib/completion/bash/docker -@@ -22,7 +22,7 @@ - # must have access to the socket for the completions to function correctly - - __docker_q() { -- docker 2>/dev/null "$@" -+ docker.io 2>/dev/null "$@" - } - - __docker_containers_all() -@@ -689,4 +689,4 @@ _docker() - return 0 - } - --complete -F _docker docker -+complete -F _docker docker.io diff -Nru docker.io-0.9.1~dfsg1/debian/patches/change-system-unit-env-file.patch docker.io-1.3.2~dfsg1/debian/patches/change-system-unit-env-file.patch --- docker.io-0.9.1~dfsg1/debian/patches/change-system-unit-env-file.patch 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/change-system-unit-env-file.patch 2014-11-25 17:43:49.000000000 +0000 @@ -0,0 +1,20 @@ +Author: Paul R. Tagliamonte +Last-Update: 2014-05-07 +Description: Use EnvironmentFile with the systemd unit file. +Bug-Debian: http://bugs.debian.org/746774 +Forwarded: no + +diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service +index 0cb31e3..240961c 100644 +--- a/contrib/init/systemd/docker.service ++++ b/contrib/init/systemd/docker.service +@@ -5,7 +5,8 @@ After=network.target docker.socket + Requires=docker.socket + + [Service] +-ExecStart=/usr/bin/docker -d -H fd:// ++EnvironmentFile=/etc/default/docker ++ExecStart=/usr/bin/docker -d -H fd:// $DOCKER_OPTS + LimitNOFILE=1048576 + LimitNPROC=1048576 + diff -Nru docker.io-0.9.1~dfsg1/debian/patches/enable-non-amd64-arches.patch docker.io-1.3.2~dfsg1/debian/patches/enable-non-amd64-arches.patch --- docker.io-0.9.1~dfsg1/debian/patches/enable-non-amd64-arches.patch 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/enable-non-amd64-arches.patch 2014-11-25 03:53:16.000000000 +0000 @@ -0,0 +1,20 @@ +Author: Paul R. Tagliamonte +Last-Update: 2014-05-07 +Description: Let this build on all platforms. +Applied-Upstream: haha-lololol +Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=747139 +Forwarded: not-needed + +diff --git a/daemon/daemon.go b/daemon/daemon.go +index 0d27549..0bd9ff6 100644 +--- a/daemon/daemon.go ++++ b/daemon/daemon.go +@@ -1073,7 +1073,7 @@ func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*i + func checkKernelAndArch() error { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { +- return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) ++ fmt.Fprintf(os.Stderr, "WARNING: The Docker runtime currently only officially supports amd64 (not %s). THIS BUILD IS NOT OFFICIAL AND WILL NOT BE SUPPORTED BY DOCKER UPSTREAM.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather diff -Nru docker.io-0.9.1~dfsg1/debian/patches/mkimage-docker.io.patch docker.io-1.3.2~dfsg1/debian/patches/mkimage-docker.io.patch --- docker.io-0.9.1~dfsg1/debian/patches/mkimage-docker.io.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/mkimage-docker.io.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -Description: Rename binary file from docker to docker.io -Author: Tianon Gravi -Forwarded: not-needed - -diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh -index 73a4173..fe9f4cc 100755 ---- a/contrib/mkimage-arch.sh -+++ b/contrib/mkimage-arch.sh -@@ -58,6 +58,6 @@ mknod -m 666 $DEV/full c 1 7 - mknod -m 600 $DEV/initctl p - mknod -m 666 $DEV/ptmx c 5 2 - --tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux --docker run -i -t archlinux echo Success. -+tar --numeric-owner -C $ROOTFS -c . | docker.io import - archlinux -+docker.io run -i -t archlinux echo Success. - rm -rf $ROOTFS -diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh -index c1bb88c..2e993f1 100755 ---- a/contrib/mkimage-busybox.sh -+++ b/contrib/mkimage-busybox.sh -@@ -35,5 +35,5 @@ do - cp -a /dev/$X dev - done - --tar --numeric-owner -cf- . | docker import - busybox --docker run -i -u root busybox /bin/echo Success. -+tar --numeric-owner -cf- . | docker.io import - busybox -+docker.io run -i -u root busybox /bin/echo Success. -diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh -index 074c334..891c725 100755 ---- a/contrib/mkimage-crux.sh -+++ b/contrib/mkimage-crux.sh -@@ -64,9 +64,9 @@ mknod -m 666 $DEV/full c 1 7 - mknod -m 600 $DEV/initctl p - mknod -m 666 $DEV/ptmx c 5 2 - --IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) --docker tag $IMAGE_ID crux:latest --docker run -i -t crux echo Success. -+IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker.io import - crux:$VERSION) -+docker.io tag $IMAGE_ID crux:latest -+docker.io run -i -t crux echo Success. - - # Cleanup - umount $CRUX -diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh -index 33ba7b0..7d480d1 100755 ---- a/contrib/mkimage-debootstrap.sh -+++ b/contrib/mkimage-debootstrap.sh -@@ -89,17 +89,17 @@ fi - - # some rudimentary detection for whether we need to "sudo" our docker calls - docker='' --if docker version > /dev/null 2>&1; then -- docker='docker' --elif sudo docker version > /dev/null 2>&1; then -- docker='sudo docker' --elif command -v docker > /dev/null 2>&1; then -- docker='docker' -+if docker.io version > /dev/null 2>&1; then -+ docker='docker.io' -+elif sudo docker.io version > /dev/null 2>&1; then -+ docker='sudo docker.io' -+elif command -v docker.io > /dev/null 2>&1; then -+ docker='docker.io' - else - echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" - echo >&2 " this script is not likely to work as expected" - sleep 3 -- docker='docker' # give us a command-not-found later -+ docker='docker.io' # give us a command-not-found later - fi - - # make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory -diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh -index dfe9999..2d5bdf2 100755 ---- a/contrib/mkimage-rinse.sh -+++ b/contrib/mkimage-rinse.sh -@@ -111,9 +111,9 @@ if [ -z "$version" ]; then - version="$distro" - fi - --sudo tar --numeric-owner -c . | docker import - $repo:$version -+sudo tar --numeric-owner -c . | docker.io import - $repo:$version - --docker run -i -t $repo:$version echo success -+docker.io run -i -t $repo:$version echo success - - cd "$returnTo" - sudo rm -rf "$target" -diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh -index a33f238..59f993f 100755 ---- a/contrib/mkimage-unittest.sh -+++ b/contrib/mkimage-unittest.sh -@@ -44,6 +44,6 @@ do - done - - chmod 0755 $ROOTFS # See #486 --tar --numeric-owner -cf- . | docker import - docker-ut --docker run -i -u root docker-ut /bin/echo Success. -+tar --numeric-owner -cf- . | docker.io import - docker-ut -+docker.io run -i -u root docker-ut /bin/echo Success. - rm -rf $ROOTFS -diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh -index f21a63a..43b04fc 100755 ---- a/contrib/mkimage-yum.sh -+++ b/contrib/mkimage-yum.sh -@@ -92,7 +92,7 @@ if [ -z "$version" ]; then - version=$name - fi - --tar --numeric-owner -c -C "$target" . | docker import - $name:$version --docker run -i -t $name:$version echo success -+tar --numeric-owner -c -C "$target" . | docker.io import - $name:$version -+docker.io run -i -t $name:$version echo success - - rm -rf "$target" diff -Nru docker.io-0.9.1~dfsg1/debian/patches/series docker.io-1.3.2~dfsg1/debian/patches/series --- docker.io-0.9.1~dfsg1/debian/patches/series 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/series 2014-11-25 17:40:07.000000000 +0000 @@ -1,11 +1,13 @@ -5049-trusty-apparmor-fix.patch +# Once upstream kills the archive/tar vendoring, remove this patch +upstream-patched-archive-tar.patch -# binary name patches (docker -> docker.io) -bash-completion-docker.io.patch -systemd-docker.io.patch -sysvinit-provides-docker.io.patch -zsh-completion-docker.io.patch -mkimage-docker.io.patch +# Once upstream adds EnvFile, remove this patch. +change-system-unit-env-file.patch +# See also https://github.com/docker/docker/pull/7220#issuecomment-50076589 -# once golang-1.3 drops, up the dep in debian/control and remove this patch -upstream-patched-archive-tar.patch +# Upstream deltas: +# -> Let there be light on non-amd64 +enable-non-amd64-arches.patch + +# Ubuntu apparmor alignment with LXC +sync-apparmor-with-lxc.patch diff -Nru docker.io-0.9.1~dfsg1/debian/patches/sync-apparmor-with-lxc.patch docker.io-1.3.2~dfsg1/debian/patches/sync-apparmor-with-lxc.patch --- docker.io-0.9.1~dfsg1/debian/patches/sync-apparmor-with-lxc.patch 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/sync-apparmor-with-lxc.patch 2014-10-01 18:23:40.000000000 +0000 @@ -0,0 +1,173 @@ +Author: Jamie Strandboge +Description: sync AppArmor policy up with current LXC/libvirt-lxc policy + +Index: docker.io-1.2.0~dfsg1/libcontainer/apparmor/gen.go +=================================================================== +--- docker.io-1.2.0~dfsg1.orig/libcontainer/apparmor/gen.go ++++ docker.io-1.2.0~dfsg1/libcontainer/apparmor/gen.go +@@ -22,40 +22,147 @@ profile {{.Name}} flags=(attach_disconne + {{$value}} + {{end}} + +- network, +- capability, ++ # Globally allows everything to run under this profile. This is fine-tuned ++ # later ++ # in this profile and can be narrowed depending on the container's use. + file, ++ capability, ++ network, ++ ++ # the container may never be allowed to mount devpts. If it does, it ++ # will remount the host's devpts. We could allow it to do it with ++ # the newinstance option (but, right now, we don't). ++ deny mount fstype=devpts, ++ + umount, + ++ # This also needs additional rules to reach outside of the container via ++ # DBus, so just let all of DBus within the container. ++ dbus, ++ ++ # Allow us to ptrace ourselves ++ ptrace peer=@{profile_name}, ++ ++ # ignore DENIED message on / remount ++ deny mount options=(ro, remount) -> /, ++ deny mount options=(ro, remount, silent) -> /, ++ ++ # allow tmpfs mounts everywhere + mount fstype=tmpfs, ++ ++ # allow hugetlbfs mounts everywhere ++ mount fstype=hugetlbfs, ++ ++ # allow mqueue mounts everywhere + mount fstype=mqueue, ++ ++ # allow fuse mounts everywhere + mount fstype=fuse.*, ++ ++ # deny writes in /proc/sys/fs but allow binfmt_misc to be mounted + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, ++ deny @{PROC}/sys/fs/** wklx, ++ ++ # allow efivars to be mounted, writing to it will be blocked though + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, +- mount fstype=fusectl -> /sys/fs/fuse/connections/, +- mount fstype=securityfs -> /sys/kernel/security/, +- mount fstype=debugfs -> /sys/kernel/debug/, +- mount fstype=proc -> /proc/, +- mount fstype=sysfs -> /sys/, + +- deny @{PROC}/sys/fs/** wklx, ++ # block some other dangerous paths + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, +- deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, +- deny @{PROC}/sys/kernel/*/** wklx, + +- deny mount options=(ro, remount) -> /, ++ # deny writes in /sys except for /sys/fs/cgroup, also allow ++ # fusectl, securityfs and debugfs to be mounted there (read-only) ++ mount fstype=fusectl -> /sys/fs/fuse/connections/, ++ mount fstype=securityfs -> /sys/kernel/security/, ++ mount fstype=debugfs -> /sys/kernel/debug/, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, +- deny mount fstype=devpts, +- +- deny /sys/[^f]*/** wklx, +- deny /sys/f[^s]*/** wklx, +- deny /sys/fs/[^c]*/** wklx, +- deny /sys/fs/c[^g]*/** wklx, +- deny /sys/fs/cg[^r]*/** wklx, ++ mount fstype=proc -> /proc/, ++ mount fstype=sysfs -> /sys/, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, ++ mount options=(move) /sys/fs/cgroup/cgmanager/ -> /sys/fs/cgroup/cgmanager.lower/, ++ ++ mount options=(rw nosuid nodev noexec remount) -> /sys/, ++ mount options=(rw remount) -> /sys/kernel/security/, ++ mount options=(rw remount) -> /sys/fs/pstore/, ++ mount options=(ro remount) -> /sys/fs/pstore/, ++ ++ deny /proc/sys/[^kn]*{,/**} wklx, ++ deny /proc/sys/k[^e]*{,/**} wklx, ++ deny /proc/sys/ke[^r]*{,/**} wklx, ++ deny /proc/sys/ker[^n]*{,/**} wklx, ++ deny /proc/sys/kern[^e]*{,/**} wklx, ++ deny /proc/sys/kerne[^l]*{,/**} wklx, ++ deny /proc/sys/kernel/[^smhd]*{,/**} wklx, ++ deny /proc/sys/kernel/d[^o]*{,/**} wklx, ++ deny /proc/sys/kernel/do[^m]*{,/**} wklx, ++ deny /proc/sys/kernel/dom[^a]*{,/**} wklx, ++ deny /proc/sys/kernel/doma[^i]*{,/**} wklx, ++ deny /proc/sys/kernel/domai[^n]*{,/**} wklx, ++ deny /proc/sys/kernel/domain[^n]*{,/**} wklx, ++ deny /proc/sys/kernel/domainn[^a]*{,/**} wklx, ++ deny /proc/sys/kernel/domainna[^m]*{,/**} wklx, ++ deny /proc/sys/kernel/domainnam[^e]*{,/**} wklx, ++ deny /proc/sys/kernel/domainname?*{,/**} wklx, ++ deny /proc/sys/kernel/h[^o]*{,/**} wklx, ++ deny /proc/sys/kernel/ho[^s]*{,/**} wklx, ++ deny /proc/sys/kernel/hos[^t]*{,/**} wklx, ++ deny /proc/sys/kernel/host[^n]*{,/**} wklx, ++ deny /proc/sys/kernel/hostn[^a]*{,/**} wklx, ++ deny /proc/sys/kernel/hostna[^m]*{,/**} wklx, ++ deny /proc/sys/kernel/hostnam[^e]*{,/**} wklx, ++ deny /proc/sys/kernel/hostname?*{,/**} wklx, ++ deny /proc/sys/kernel/m[^s]*{,/**} wklx, ++ deny /proc/sys/kernel/ms[^g]*{,/**} wklx, ++ deny /proc/sys/kernel/msg*/** wklx, ++ deny /proc/sys/kernel/s[^he]*{,/**} wklx, ++ deny /proc/sys/kernel/se[^m]*{,/**} wklx, ++ deny /proc/sys/kernel/sem*/** wklx, ++ deny /proc/sys/kernel/sh[^m]*{,/**} wklx, ++ deny /proc/sys/kernel/shm*/** wklx, ++ deny /proc/sys/kernel?*{,/**} wklx, ++ deny /proc/sys/n[^e]*{,/**} wklx, ++ deny /proc/sys/ne[^t]*{,/**} wklx, ++ deny /proc/sys/net?*{,/**} wklx, ++ deny /sys/[^fdc]*{,/**} wklx, ++ deny /sys/c[^l]*{,/**} wklx, ++ deny /sys/cl[^a]*{,/**} wklx, ++ deny /sys/cla[^s]*{,/**} wklx, ++ deny /sys/clas[^s]*{,/**} wklx, ++ deny /sys/class/[^n]*{,/**} wklx, ++ deny /sys/class/n[^e]*{,/**} wklx, ++ deny /sys/class/ne[^t]*{,/**} wklx, ++ deny /sys/class/net?*{,/**} wklx, ++ deny /sys/class?*{,/**} wklx, ++ deny /sys/d[^e]*{,/**} wklx, ++ deny /sys/de[^v]*{,/**} wklx, ++ deny /sys/dev[^i]*{,/**} wklx, ++ deny /sys/devi[^c]*{,/**} wklx, ++ deny /sys/devic[^e]*{,/**} wklx, ++ deny /sys/device[^s]*{,/**} wklx, ++ deny /sys/devices/[^v]*{,/**} wklx, ++ deny /sys/devices/v[^i]*{,/**} wklx, ++ deny /sys/devices/vi[^r]*{,/**} wklx, ++ deny /sys/devices/vir[^t]*{,/**} wklx, ++ deny /sys/devices/virt[^u]*{,/**} wklx, ++ deny /sys/devices/virtu[^a]*{,/**} wklx, ++ deny /sys/devices/virtua[^l]*{,/**} wklx, ++ deny /sys/devices/virtual/[^n]*{,/**} wklx, ++ deny /sys/devices/virtual/n[^e]*{,/**} wklx, ++ deny /sys/devices/virtual/ne[^t]*{,/**} wklx, ++ deny /sys/devices/virtual/net?*{,/**} wklx, ++ deny /sys/devices/virtual?*{,/**} wklx, ++ deny /sys/devices?*{,/**} wklx, ++ deny /sys/f[^s]*{,/**} wklx, ++ deny /sys/fs/[^c]*{,/**} wklx, ++ deny /sys/fs/c[^g]*{,/**} wklx, ++ deny /sys/fs/cg[^r]*{,/**} wklx, ++ deny /sys/fs/cgr[^o]*{,/**} wklx, ++ deny /sys/fs/cgro[^u]*{,/**} wklx, ++ deny /sys/fs/cgrou[^p]*{,/**} wklx, ++ deny /sys/fs/cgroup?*{,/**} wklx, ++ deny /sys/fs?*{,/**} wklx, + } + ` + diff -Nru docker.io-0.9.1~dfsg1/debian/patches/systemd-docker.io.patch docker.io-1.3.2~dfsg1/debian/patches/systemd-docker.io.patch --- docker.io-0.9.1~dfsg1/debian/patches/systemd-docker.io.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/systemd-docker.io.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -Description: Rename binary file from docker to docker.io -Author: Tianon Gravi -Forwarded: not-needed - -diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service -index 1bc4d1f..505402f 100644 ---- a/contrib/init/systemd/docker.service -+++ b/contrib/init/systemd/docker.service -@@ -4,7 +4,7 @@ Documentation=http://docs.docker.io - After=network.target - - [Service] --ExecStart=/usr/bin/docker -d -+ExecStart=/usr/bin/docker.io -d - Restart=on-failure - LimitNOFILE=1048576 - LimitNPROC=1048576 -diff --git a/contrib/init/systemd/socket-activation/docker.service b/contrib/init/systemd/socket-activation/docker.service -index a3382ab..b80c0cf 100644 ---- a/contrib/init/systemd/socket-activation/docker.service -+++ b/contrib/init/systemd/socket-activation/docker.service -@@ -4,7 +4,7 @@ Documentation=http://docs.docker.io - After=network.target - - [Service] --ExecStart=/usr/bin/docker -d -H fd:// -+ExecStart=/usr/bin/docker.io -d -H fd:// - Restart=on-failure - LimitNOFILE=1048576 - LimitNPROC=1048576 diff -Nru docker.io-0.9.1~dfsg1/debian/patches/sysvinit-provides-docker.io.patch docker.io-1.3.2~dfsg1/debian/patches/sysvinit-provides-docker.io.patch --- docker.io-0.9.1~dfsg1/debian/patches/sysvinit-provides-docker.io.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/sysvinit-provides-docker.io.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -Description: Sysvinit needs to "Provides" docker.io, not docker -Author: Tianon Gravi -Forwarded: not-needed - ---- a/contrib/init/sysvinit-debian/docker -+++ b/contrib/init/sysvinit-debian/docker -@@ -1,7 +1,7 @@ - #!/bin/sh - - ### BEGIN INIT INFO --# Provides: docker -+# Provides: docker.io - # Required-Start: $syslog $remote_fs - # Required-Stop: $syslog $remote_fs - # Default-Start: 2 3 4 5 diff -Nru docker.io-0.9.1~dfsg1/debian/patches/upstream-patched-archive-tar.patch docker.io-1.3.2~dfsg1/debian/patches/upstream-patched-archive-tar.patch --- docker.io-0.9.1~dfsg1/debian/patches/upstream-patched-archive-tar.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/upstream-patched-archive-tar.patch 2014-11-25 03:53:16.000000000 +0000 @@ -1,150 +1,182 @@ Author: Tianon Gravi Description: "archive/tar" patch for upstreamed xattrs patch -Applied-Upstream: when golang-1.3 is broadly packaged (scheduled to be released 2014-06-01) +Applied-Upstream: when golang-1.4 is broadly packaged (scheduled to be released 2014-12-01) -diff --git a/archive/archive.go b/archive/archive.go -index 5d6c020..7f318dc 100644 ---- a/archive/archive.go -+++ b/archive/archive.go -@@ -1,6 +1,7 @@ - package archive - - import ( -+ "archive/tar" - "bytes" - "compress/bzip2" - "compress/gzip" -@@ -8,7 +9,6 @@ import ( - "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" -diff --git a/archive/archive_test.go b/archive/archive_test.go -index 4126601..164fc8f 100644 ---- a/archive/archive_test.go -+++ b/archive/archive_test.go -@@ -1,9 +1,9 @@ - package archive +diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go +index da51254..de232cb 100644 +--- a/graph/tags_unit_test.go ++++ b/graph/tags_unit_test.go +@@ -7,11 +7,11 @@ import ( + "path" + "testing" - import ( -+ "archive/tar" - "bytes" - "fmt" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" -diff --git a/archive/changes.go b/archive/changes.go -index 723e4a7..120f33a 100644 ---- a/archive/changes.go -+++ b/archive/changes.go -@@ -1,11 +1,11 @@ - package archive - - import ( + "archive/tar" - "bytes" - "fmt" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "os" - "path/filepath" -diff --git a/archive/diff.go b/archive/diff.go -index e20e4b1..7bf01f3 100644 ---- a/archive/diff.go -+++ b/archive/diff.go -@@ -1,8 +1,8 @@ - package archive + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/image" + "github.com/docker/docker/utils" +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + ) - import ( -+ "archive/tar" - "fmt" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "os" -diff --git a/archive/wrap.go b/archive/wrap.go -index 03ea508..dfb335c 100644 ---- a/archive/wrap.go -+++ b/archive/wrap.go -@@ -1,8 +1,8 @@ - package archive + const ( +diff --git a/integration-cli/utils.go b/integration-cli/utils.go +index f3f128e..abea8a3 100644 +--- a/integration-cli/utils.go ++++ b/integration-cli/utils.go +@@ -16,7 +16,7 @@ import ( + "testing" + "time" - import ( +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" - "bytes" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io/ioutil" ) + func getExitCode(err error) (int, error) { diff --git a/integration/api_test.go b/integration/api_test.go -index cb92d89..ceb38c1 100644 +index 8fa295e..9c1ecb2 100644 --- a/integration/api_test.go +++ b/integration/api_test.go -@@ -1,6 +1,7 @@ - package docker +@@ -14,11 +14,11 @@ import ( + "testing" + "time" - import ( + "archive/tar" - "bufio" - "bytes" - "encoding/json" -@@ -11,7 +12,6 @@ import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "net" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server" + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + ) + + func TestGetContainersJSON(t *testing.T) { diff --git a/integration/utils_test.go b/integration/utils_test.go -index 05d73df..80c02a3 100644 +index e1abfa7..c5bc3af 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go -@@ -1,9 +1,9 @@ - package docker +@@ -13,7 +13,7 @@ import ( + "testing" + "time" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" +diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go +index 155145f..0c41f1b 100644 +--- a/pkg/archive/archive.go ++++ b/pkg/archive/archive.go +@@ -16,7 +16,7 @@ import ( + "strings" + "syscall" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/log" +diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go +index 7c9db44..39c8caf 100644 +--- a/pkg/archive/archive_test.go ++++ b/pkg/archive/archive_test.go +@@ -12,7 +12,7 @@ import ( + "testing" + "time" - import ( +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" - "bytes" - "fmt" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" - "io/ioutil" - "net/http" -diff --git a/utils/tarsum.go b/utils/tarsum.go -index 67e94aa..786196b 100644 ---- a/utils/tarsum.go -+++ b/utils/tarsum.go -@@ -1,11 +1,11 @@ - package utils + ) + func TestCmdStreamLargeStderr(t *testing.T) { +diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go +index 5fbdcc9..fa96bb8 100644 +--- a/pkg/archive/changes.go ++++ b/pkg/archive/changes.go +@@ -10,7 +10,7 @@ import ( + "syscall" + "time" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/pools" +diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go +index 5ed1a1d..f20fcb8 100644 +--- a/pkg/archive/diff.go ++++ b/pkg/archive/diff.go +@@ -9,7 +9,7 @@ import ( + "strings" + "syscall" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + + "github.com/docker/docker/pkg/pools" + ) +diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go +index 758c411..1af10fe 100644 +--- a/pkg/archive/diff_test.go ++++ b/pkg/archive/diff_test.go +@@ -3,7 +3,7 @@ package archive import ( + "testing" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "hash" - "io" - "sort" -diff --git a/utils_test.go b/utils_test.go -index 31fa12b..4b8cfba 100644 ---- a/utils_test.go -+++ b/utils_test.go + ) + + func TestApplyLayerInvalidFilenames(t *testing.T) { +diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go +index 3624fe5..8e26a11 100644 +--- a/pkg/archive/utils_test.go ++++ b/pkg/archive/utils_test.go +@@ -9,7 +9,7 @@ import ( + "path/filepath" + "time" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + ) + + var testUntarFns = map[string]func(string, io.Reader) error{ +diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go +index b8b6019..dfb335c 100644 +--- a/pkg/archive/wrap.go ++++ b/pkg/archive/wrap.go @@ -1,8 +1,8 @@ - package docker + package archive import ( + "archive/tar" "bytes" -- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io/ioutil" + ) + +diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go +index 6581f3f..ed36494 100644 +--- a/pkg/tarsum/tarsum.go ++++ b/pkg/tarsum/tarsum.go +@@ -11,7 +11,7 @@ import ( + "strconv" + "strings" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" + + "github.com/docker/docker/pkg/log" + ) +diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go +index 1e06cda..ef910c3 100644 +--- a/pkg/tarsum/tarsum_test.go ++++ b/pkg/tarsum/tarsum_test.go +@@ -15,7 +15,7 @@ import ( + "os" + "testing" + +- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ++ "archive/tar" ) + type testLayer struct { diff -Nru docker.io-0.9.1~dfsg1/debian/patches/zsh-completion-docker.io.patch docker.io-1.3.2~dfsg1/debian/patches/zsh-completion-docker.io.patch --- docker.io-0.9.1~dfsg1/debian/patches/zsh-completion-docker.io.patch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/patches/zsh-completion-docker.io.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -Description: Rename binary file from docker to docker.io -Author: Prach Pongpanich -Forwarded: not-needed - ---- a/contrib/completion/zsh/_docker -+++ b/contrib/completion/zsh/_docker -@@ -1,4 +1,4 @@ --#compdef docker -+#compdef docker.io - # - # zsh completion for docker (http://docker.io) - # -@@ -15,7 +15,7 @@ __parse_docker_list() { - __docker_stoppedcontainers() { - local expl - declare -a stoppedcontainers -- stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' | __parse_docker_list )"}) -+ stoppedcontainers=(${(f)"$(docker.io ps -a | grep --color=never 'Exit' | __parse_docker_list )"}) - _describe -t containers-stopped "Stopped Containers" stoppedcontainers - } - -@@ -23,7 +23,7 @@ __docker_runningcontainers() { - local expl - declare -a containers - -- containers=(${(f)"$(docker ps | __parse_docker_list)"}) -+ containers=(${(f)"$(docker.io ps | __parse_docker_list)"}) - _describe -t containers-active "Running Containers" containers - } - -@@ -35,15 +35,15 @@ __docker_containers () { - __docker_images () { - local expl - declare -a images -- images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"}) -- images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) -+ images=(${(f)"$(docker.io images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"}) -+ images=($images ${(f)"$(docker.io images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) - _describe -t docker-images "Images" images - } - - __docker_tags() { - local expl - declare -a tags -- tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"}) -+ tags=(${(f)"$(docker.io images | awk '(NR>1){print $2}'| sort | uniq)"}) - _describe -t docker-tags "tags" tags - } - -@@ -64,7 +64,7 @@ __docker_search() { - if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ - && ! _retrieve_cache ${cachename#_}; then - _message "Searching for ${searchterm}..." -- result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"}) -+ result=(${(f)"$(docker.io search ${searchterm} | awk '(NR>2){print $1}')"}) - _store_cache ${cachename#_} result - fi - _wanted dockersearch expl 'Available images' compadd -a result -@@ -81,7 +81,7 @@ __docker_caching_policy() - __docker_repositories () { - local expl - declare -a repos -- repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) -+ repos=(${(f)"$(docker.io images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) - _describe -t docker-repos "Repositories" repos - } - -@@ -98,7 +98,7 @@ __docker_commands () { - && ! _retrieve_cache docker_subcommands; - then - _docker_subcommands=(${${(f)"$(_call_program commands -- docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}}) -+ docker.io 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}}) - _docker_subcommands=($_docker_subcommands 'help:Show help for a command') - _store_cache docker_subcommands _docker_subcommands - fi diff -Nru docker.io-0.9.1~dfsg1/debian/prune/docs docker.io-1.3.2~dfsg1/debian/prune/docs --- docker.io-0.9.1~dfsg1/debian/prune/docs 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/prune/docs 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -./docs diff -Nru docker.io-0.9.1~dfsg1/debian/prune/vendor docker.io-1.3.2~dfsg1/debian/prune/vendor --- docker.io-0.9.1~dfsg1/debian/prune/vendor 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/prune/vendor 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -./vendor diff -Nru docker.io-0.9.1~dfsg1/debian/README.Debian docker.io-1.3.2~dfsg1/debian/README.Debian --- docker.io-0.9.1~dfsg1/debian/README.Debian 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/README.Debian 2014-11-25 03:53:15.000000000 +0000 @@ -1,12 +1,6 @@ Docker on Debian ================ -Due to a binary name conflict, the Debian Docker binary is installed as -"docker.io". Please adjust your scripts and expectations accordingly. - -https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=740856#10 - - To enable docker memory limitation, the kernel needs to be loaded with boot parameters: cgroup_enable=memory swapaccount=1. @@ -14,7 +8,7 @@ and kernel maintainers don't want to slow down systems unnecessarily. http://www.mail-archive.com/debian-bugs-dist@lists.debian.org/msg764104.html -https://github.com/dotcloud/docker/issues/396 +https://github.com/docker/docker/issues/396 To instruct the kernel to enable memory cgroup support, edit /etc/default/grub and extend GRUB_CMDLINE_LINUX_DEFAULT like: @@ -32,7 +26,7 @@ adduser YOURUSER docker As also noted in the upstream documentation, the "docker" group (and any other -means of accessing the Docker API) is root-equivilant. If you don't trust a +means of accessing the Docker API) is root-equivalent. If you don't trust a user with root on your box, you shouldn't trust them with Docker either. If you are interested in further information about the security aspects of Docker, please be sure to read the "Docker Security" diff -Nru docker.io-0.9.1~dfsg1/debian/repack/prune/docs docker.io-1.3.2~dfsg1/debian/repack/prune/docs --- docker.io-0.9.1~dfsg1/debian/repack/prune/docs 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/repack/prune/docs 2014-11-25 03:53:16.000000000 +0000 @@ -0,0 +1,2 @@ +./docs/sources +./docs/theme diff -Nru docker.io-0.9.1~dfsg1/debian/repack/prune/vendor docker.io-1.3.2~dfsg1/debian/repack/prune/vendor --- docker.io-0.9.1~dfsg1/debian/repack/prune/vendor 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/repack/prune/vendor 2014-11-25 03:53:16.000000000 +0000 @@ -0,0 +1 @@ +./vendor diff -Nru docker.io-0.9.1~dfsg1/debian/repack.sh docker.io-1.3.2~dfsg1/debian/repack.sh --- docker.io-0.9.1~dfsg1/debian/repack.sh 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/repack.sh 2014-11-25 03:53:15.000000000 +0000 @@ -1,9 +1,9 @@ -#!/bin/sh +#!/bin/bash # Taken from the X Strike Force Build System set -e -if ! [ -d debian/prune ]; then +if ! [ -d debian/repack/prune ]; then exit 0 fi @@ -23,10 +23,17 @@ cd "$tempdir" tar xf "$dir/$filename" -cat "$dir"/debian/prune/* | while read file; do rm -rvf */$file; done - -dfsgfilename="$(echo $filename | sed -E 's/(\.orig\.)/~dfsg1\1/')" -#dfsgfilename="$(echo $filename | sed -E 's/(\.orig\.)/+dfsg1\1/')" +cat "$dir"/debian/repack/prune/* | while read file; do + if [ -e */"$file" ]; then + echo "Pruning $file" + rm -rf */"$file" + fi +done + +dfsgfilename="$filename" +if [[ "$dfsgfilename" != *dfsg* ]]; then + dfsgfilename="${dfsgfilename/.orig/~dfsg1.orig}" +fi tar -czf ${dir}/${dfsgfilename} * cd "$dir" rm -rf "$tempdir" diff -Nru docker.io-0.9.1~dfsg1/debian/rules docker.io-1.3.2~dfsg1/debian/rules --- docker.io-0.9.1~dfsg1/debian/rules 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/rules 2014-11-25 17:44:00.000000000 +0000 @@ -2,10 +2,13 @@ # -*- makefile -*- # Tell dh-golang where this package lives upstream -export DH_GOPKG := github.com/dotcloud/docker +export DH_GOPKG := github.com/docker/docker # Tell dh-golang that we DO need subpackages export DH_GOLANG_INSTALL_ALL := 1 +LIBCONTAINER_GOPKG = github.com/docker/libcontainer +LIBTRUST_GOPKG = github.com/docker/libtrust + # temporary build path (see http://golang.org/doc/code.html#GOPATH) export GOPATH := $(CURDIR)/obj-$(DEB_BUILD_GNU_TYPE) @@ -14,30 +17,61 @@ INITPATH = ${INITDIR}/dockerinit DOCKER_VERSION = $(shell cat VERSION) -export DOCKER_GITCOMMIT := $(shell awk '/^$(DOCKER_VERSION):/ { print $$2 }' debian/upstream-version-gitcommits) +export DOCKER_GITCOMMIT := $(shell ./debian/helpers/gitcommit.sh $(DOCKER_VERSION)) export DOCKER_INITPATH := ${INITPATH} # good old Ubuntu needs AppArmor export DOCKER_BUILDTAGS := apparmor +APPARMOR_RECOMMENDS = $(shell dpkg-vendor --is Ubuntu && echo apparmor) +override_dh_gencontrol: + echo 'apparmor:Recommends=$(APPARMOR_RECOMMENDS)' >> debian/docker.io.substvars + dh_gencontrol + + override_dh_auto_build: @bash -c '{ [ "$$DOCKER_GITCOMMIT" ]; } || { echo; echo "error: missing DOCKER_GITCOMMIT - see debian/upstream-version-gitcommits"; echo; exit 2; } >&2' + + @# this is especially for easier build-testing of nightlies + @[ -d libcontainer ] || { [ -d vendor/src/$(LIBCONTAINER_GOPKG) ] && ln -sf vendor/src/$(LIBCONTAINER_GOPKG) libcontainer; } + @[ -d libtrust ] || { [ -d vendor/src/$(LIBTRUST_GOPKG) ] && ln -sf vendor/src/$(LIBTRUST_GOPKG) libtrust; } + + @# we need to make sure our multitarball deps are in our GOPATH + @mkdir -p "$$GOPATH/src/$(dir $(LIBCONTAINER_GOPKG))" "$$GOPATH/src/$(dir $(LIBTRUST_GOPKG))" + ln -sf "$$(readlink -f libcontainer)" "$(GOPATH)/src/$(dir $(LIBCONTAINER_GOPKG))" + ln -sf "$$(readlink -f libtrust)" "$(GOPATH)/src/$(dir $(LIBTRUST_GOPKG))" + ./hack/make.sh dynbinary + + # compile man pages + ./docs/man/md2man-all.sh override_dh_auto_install: dh_auto_install + + # install docker binary mkdir -p debian/docker.io/usr/bin - mv bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} debian/docker.io/usr/bin/docker.io + mv bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} debian/docker.io/usr/bin/docker + + # install dockerinit binary mkdir -p debian/docker.io/${INITDIR} mv bundles/${DOCKER_VERSION}/dynbinary/dockerinit-${DOCKER_VERSION} debian/docker.io/${INITPATH} - # The source of docker does not make a library, so dont ship it. - rm -rf debian/docker.io/usr/share/gocode - # Manual install zsh completion file because of the dh_install does not support rename file - mkdir -p debian/docker.io/usr/share/zsh/vendor-completions - install -D -m 644 contrib/completion/zsh/_docker debian/docker.io/usr/share/zsh/vendor-completions/_docker.io - + + # Most of the source of docker does not make a library, + # so only ship the reusable parts (and in a separate package). + mkdir -p debian/golang-docker-dev/usr/share/gocode/src/${DH_GOPKG} + mv -v \ + debian/tmp/usr/share/gocode/src/${DH_GOPKG}/pkg \ + debian/golang-docker-dev/usr/share/gocode/src/${DH_GOPKG}/ + mkdir -p debian/golang-docker-dev/usr/share/gocode/src/$(dir $(LIBCONTAINER_GOPKG)) + @# this is especially for easier build-testing of nightlies + @[ -d debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer ] || { [ -d debian/tmp/usr/share/gocode/src/${DH_GOPKG}/vendor/src/$(LIBCONTAINER_GOPKG) ] && mv debian/tmp/usr/share/gocode/src/${DH_GOPKG}/vendor/src/$(LIBCONTAINER_GOPKG) debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer; } + mv -v \ + debian/tmp/usr/share/gocode/src/${DH_GOPKG}/libcontainer \ + debian/golang-docker-dev/usr/share/gocode/src/$(dir $(LIBCONTAINER_GOPKG)) + rm -rf debian/tmp/usr/share/gocode # the SHA1 of dockerinit is important: don't strip it @@ -48,10 +82,16 @@ override_dh_auto_test: -override_dh_installman: - # we have to manually install because of the period in the name - mkdir -p debian/docker.io/usr/share/man/man1 - cp debian/docker.io.1 debian/docker.io/usr/share/man/man1/ +override_dh_installinit: + dh_installinit --name=docker + +override_dh_systemd_enable: + dh_systemd_enable -pdocker.io --no-enable docker.service + dh_systemd_enable -pdocker.io docker.socket + +override_dh_systemd_start: + dh_systemd_start -pdocker.io --no-start docker.service + dh_systemd_start -pdocker.io docker.socket override_dh_installchangelogs: diff -Nru docker.io-0.9.1~dfsg1/debian/upstream-version-gitcommits docker.io-1.3.2~dfsg1/debian/upstream-version-gitcommits --- docker.io-0.9.1~dfsg1/debian/upstream-version-gitcommits 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/upstream-version-gitcommits 2014-11-25 03:53:15.000000000 +0000 @@ -22,3 +22,16 @@ 0.8.1: a1598d1 0.9.0: 2b3fdf2 0.9.1: 3600720 +0.10.0: dc9c28f +0.11.0: 15209c3 +0.11.1: fb99f99 +0.12.0: 14680bf +1.0.0: 63fe64c +1.0.1: 990021a +1.1.0: 79812e3 +1.1.1: bd609d2 +1.1.2: d84a070 +1.2.0: fa7b24f +1.3.0: c78088f +1.3.1: 4e9bbfa +1.3.2: 39fa2fa diff -Nru docker.io-0.9.1~dfsg1/debian/watch docker.io-1.3.2~dfsg1/debian/watch --- docker.io-0.9.1~dfsg1/debian/watch 2014-04-09 03:19:13.000000000 +0000 +++ docker.io-1.3.2~dfsg1/debian/watch 2014-11-25 03:53:15.000000000 +0000 @@ -3,4 +3,4 @@ dversionmangle=s/[+~](debian|dfsg|ds|deb)\d*$//,\ uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\d*)$/$1~$2/,\ filenamemangle=s/.+\/v(\d\S*)\.tar\.gz/docker.io_$1.orig.tar.gz/ \ - https://github.com/dotcloud/docker/tags .*/v(\d\S*)\.tar\.gz debian /bin/sh debian/repack.sh + https://github.com/docker/docker/tags .*/v(\d\S*)\.tar\.gz debian ./debian/repack.sh diff -Nru docker.io-0.9.1~dfsg1/docker/client.go docker.io-1.3.2~dfsg1/docker/client.go --- docker.io-0.9.1~dfsg1/docker/client.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docker/client.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,13 @@ +// +build !daemon + +package main + +import ( + "log" +) + +const CanDaemon = false + +func mainDaemon() { + log.Fatal("This is a client-only binary - running the Docker daemon is not supported.") +} diff -Nru docker.io-0.9.1~dfsg1/docker/daemon.go docker.io-1.3.2~dfsg1/docker/daemon.go --- docker.io-0.9.1~dfsg1/docker/daemon.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docker/daemon.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,93 @@ +// +build daemon + +package main + +import ( + "log" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/registry" +) + +const CanDaemon = true + +var ( + daemonCfg = &daemon.Config{} +) + +func init() { + daemonCfg.InstallFlags() +} + +func mainDaemon() { + if flag.NArg() != 0 { + flag.Usage() + return + } + eng := engine.New() + signal.Trap(eng.Shutdown) + + // Load builtins + if err := builtins.Register(eng); err != nil { + log.Fatal(err) + } + + // load registry service + if err := registry.NewService(daemonCfg.InsecureRegistries).Install(eng); err != nil { + log.Fatal(err) + } + + // load the daemon in the background so we can immediately start + // the http api so that connections don't fail while the daemon + // is booting + go func() { + d, err := daemon.NewDaemon(daemonCfg, eng) + if err != nil { + log.Fatal(err) + } + if err := d.Install(eng); err != nil { + log.Fatal(err) + } + + b := &builder.BuilderJob{eng, d} + b.Install() + + // after the daemon is done setting up we can tell the api to start + // accepting connections + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatal(err) + } + }() + // TODO actually have a resolved graphdriver to show? + log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + dockerversion.VERSION, + dockerversion.GITCOMMIT, + daemonCfg.ExecDriver, + daemonCfg.GraphDriver, + ) + + // Serve api + job := eng.Job("serveapi", flHosts...) + job.SetenvBool("Logging", true) + job.SetenvBool("EnableCors", *flEnableCors) + job.Setenv("Version", dockerversion.VERSION) + job.Setenv("SocketGroup", *flSocketGroup) + + job.SetenvBool("Tls", *flTls) + job.SetenvBool("TlsVerify", *flTlsVerify) + job.Setenv("TlsCa", *flCa) + job.Setenv("TlsCert", *flCert) + job.Setenv("TlsKey", *flKey) + job.SetenvBool("BufferRequests", true) + if err := job.Run(); err != nil { + log.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/docker/docker.go docker.io-1.3.2~dfsg1/docker/docker.go --- docker.io-0.9.1~dfsg1/docker/docker.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docker/docker.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,162 +1,116 @@ package main import ( + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "log" "os" "strings" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/builtins" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/opts" - "github.com/dotcloud/docker/sysinit" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/api" + "github.com/docker/docker/api/client" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/utils" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" ) func main() { - if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { - // Running in init mode - sysinit.SysInit() + if reexec.Init() { return } - - var ( - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") - flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") - flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") - flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") - bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking") - bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") - pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") - flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime") - flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group") - flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flDns = opts.NewListOpts(opts.ValidateIp4Address) - flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules") - flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward") - flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") - flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") - flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") - flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver") - flHosts = opts.NewListOpts(api.ValidateHost) - flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available") - ) - flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") - flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") - flag.Parse() + // FIXME: validate daemon flags here if *flVersion { showVersion() return } - if flHosts.Len() == 0 { - defaultHost := os.Getenv("DOCKER_HOST") + if *flDebug { + os.Setenv("DEBUG", "1") + } + if len(flHosts) == 0 { + defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } - if _, err := api.ValidateHost(defaultHost); err != nil { + defaultHost, err := api.ValidateHost(defaultHost) + if err != nil { log.Fatal(err) } - flHosts.Set(defaultHost) + flHosts = append(flHosts, defaultHost) } - if *bridgeName != "" && *bridgeIp != "" { - log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") + if *flDaemon { + mainDaemon() + return } - if *flDebug { - os.Setenv("DEBUG", "1") + if len(flHosts) > 1 { + log.Fatal("Please specify only one -H") } - if *flDaemon { - if flag.NArg() != 0 { - flag.Usage() - return - } + protoAddrParts := strings.SplitN(flHosts[0], "://", 2) - // set up the TempDir to use a canonical path - tmp := os.TempDir() - realTmp, err := utils.ReadSymlinkedDirectory(tmp) + var ( + cli *client.DockerCli + tlsConfig tls.Config + ) + tlsConfig.InsecureSkipVerify = true + + // If we should verify the server, we need to load a trusted ca + if *flTlsVerify { + *flTls = true + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(*flCa) if err != nil { - log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } - os.Setenv("TMPDIR", realTmp) + certPool.AppendCertsFromPEM(file) + tlsConfig.RootCAs = certPool + tlsConfig.InsecureSkipVerify = false + } - // get the canonical path to the Docker root directory - root := *flRoot - var realRoot string - if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { - realRoot = root - } else { - realRoot, err = utils.ReadSymlinkedDirectory(root) + // If tls is enabled, try to load and send client certificates + if *flTls || *flTlsVerify { + _, errCert := os.Stat(*flCert) + _, errKey := os.Stat(*flKey) + if errCert == nil && errKey == nil { + *flTls = true + cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { - log.Fatalf("Unable to get the full path to root (%s): %s", root, err) + log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } + tlsConfig.Certificates = []tls.Certificate{cert} } + // Avoid fallback to SSL protocols < TLS1.0 + tlsConfig.MinVersion = tls.VersionTLS10 + } - eng, err := engine.New(realRoot) - if err != nil { - log.Fatal(err) - } - // Load builtins - builtins.Register(eng) - // load the daemon in the background so we can immediately start - // the http api so that connections don't fail while the daemon - // is booting - go func() { - // Load plugin: httpapi - job := eng.Job("initserver") - job.Setenv("Pidfile", *pidfile) - job.Setenv("Root", realRoot) - job.SetenvBool("AutoRestart", *flAutoRestart) - job.SetenvList("Dns", flDns.GetAll()) - job.SetenvBool("EnableIptables", *flEnableIptables) - job.SetenvBool("EnableIpForward", *flEnableIpForward) - job.Setenv("BridgeIface", *bridgeName) - job.Setenv("BridgeIP", *bridgeIp) - job.Setenv("DefaultIp", *flDefaultIp) - job.SetenvBool("InterContainerCommunication", *flInterContainerComm) - job.Setenv("GraphDriver", *flGraphDriver) - job.Setenv("ExecDriver", *flExecDriver) - job.SetenvInt("Mtu", *flMtu) - if err := job.Run(); err != nil { - log.Fatal(err) - } - // after the daemon is done setting up we can tell the api to start - // accepting connections - if err := eng.Job("acceptconnections").Run(); err != nil { - log.Fatal(err) - } - }() - - // Serve api - job := eng.Job("serveapi", flHosts.GetAll()...) - job.SetenvBool("Logging", true) - job.SetenvBool("EnableCors", *flEnableCors) - job.Setenv("Version", dockerversion.VERSION) - job.Setenv("SocketGroup", *flSocketGroup) - if err := job.Run(); err != nil { - log.Fatal(err) - } + if *flTls || *flTlsVerify { + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { - if flHosts.Len() > 1 { - log.Fatal("Please specify only one -H") - } - protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) - if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { - if sterr, ok := err.(*utils.StatusError); ok { - if sterr.Status != "" { - log.Println(sterr.Status) - } - os.Exit(sterr.StatusCode) + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, nil, protoAddrParts[0], protoAddrParts[1], nil) + } + + if err := cli.Cmd(flag.Args()...); err != nil { + if sterr, ok := err.(*utils.StatusError); ok { + if sterr.Status != "" { + log.Println(sterr.Status) } - log.Fatal(err) + os.Exit(sterr.StatusCode) } + log.Fatal(err) } } diff -Nru docker.io-0.9.1~dfsg1/docker/flags.go docker.io-1.3.2~dfsg1/docker/flags.go --- docker.io-0.9.1~dfsg1/docker/flags.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docker/flags.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,101 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker") + } +} + +var ( + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") + flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") + flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") + flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") + flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") + + // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs + flTrustKey *string + flCa *string + flCert *string + flKey *string + flHosts []string +) + +func init() { + // placeholder for trust key flag + trustKeyDefault := filepath.Join(dockerCertPath, defaultTrustKeyFile) + flTrustKey = &trustKeyDefault + + flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") + flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") + + flag.Usage = func() { + fmt.Fprint(os.Stderr, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n") + + flag.PrintDefaults() + + help := "\nCommands:\n" + + for _, command := range [][]string{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders from a container's filesystem to the host path"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in an existing container"}, + {"export", "Stream the contents of a container as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Create a new filesystem image from the contents of a tarball"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive"}, + {"login", "Register or log in to a Docker registry server"}, + {"logout", "Log out from a Docker registry server"}, + {"logs", "Fetch the logs of a container"}, + {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, + {"pause", "Pause all processes within a container"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a Docker registry server"}, + {"push", "Push an image or a repository to a Docker registry server"}, + {"restart", "Restart a running container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save an image to a tar archive"}, + {"search", "Search for an image on the Docker Hub"}, + {"start", "Start a stopped container"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Lookup the running processes of a container"}, + {"unpause", "Unpause a paused container"}, + {"version", "Show the Docker version information"}, + {"wait", "Block until a container stops, then print its exit code"}, + } { + help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) + } + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(os.Stderr, "%s\n", help) + } +} diff -Nru docker.io-0.9.1~dfsg1/docker/README.md docker.io-1.3.2~dfsg1/docker/README.md --- docker.io-0.9.1~dfsg1/docker/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docker/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff -Nru docker.io-0.9.1~dfsg1/Dockerfile docker.io-1.3.2~dfsg1/Dockerfile --- docker.io-0.9.1~dfsg1/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -6,13 +6,13 @@ # docker build -t docker . # # # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: -# docker run -privileged docker hack/make.sh test +# docker run --privileged docker hack/make.sh test # # # Publish a release: -# docker run -privileged \ +# docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ @@ -24,12 +24,11 @@ # docker-version 0.6.1 -FROM ubuntu:13.10 +FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ +RUN apt-get update && apt-get install -y \ aufs-tools \ automake \ btrfs-tools \ @@ -41,17 +40,15 @@ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ + lxc=1.0* \ mercurial \ + parallel \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ --no-install-recommends -# Get and compile LXC 0.8 (since it is the most stable) -RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0 -RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install - # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags @@ -62,13 +59,17 @@ # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go -RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz +RUN curl -sSL https://golang.org/dl/go1.3.3.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV PATH /go/bin:$PATH RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' @@ -77,20 +78,36 @@ RUN go get code.google.com/p/go.tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 +RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 + +# Install man page generator +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Get the "cirros" image source so we can import it instead of fetching it during tests +RUN curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz # Setup s3cmd config -RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > $HOME/.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + VOLUME /var/lib/docker -WORKDIR /go/src/github.com/dotcloud/docker -ENV DOCKER_BUILDTAGS apparmor +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source -ADD . /go/src/github.com/dotcloud/docker +COPY . /go/src/github.com/docker/docker diff -Nru docker.io-0.9.1~dfsg1/.dockerignore docker.io-1.3.2~dfsg1/.dockerignore --- docker.io-0.9.1~dfsg1/.dockerignore 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/.dockerignore 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +bundles +.gopath diff -Nru docker.io-0.9.1~dfsg1/dockerinit/dockerinit.go docker.io-1.3.2~dfsg1/dockerinit/dockerinit.go --- docker.io-0.9.1~dfsg1/dockerinit/dockerinit.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/dockerinit/dockerinit.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,11 +1,12 @@ package main import ( - "github.com/dotcloud/docker/sysinit" + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/reexec" ) func main() { // Running in init mode - sysinit.SysInit() - return + reexec.Init() } diff -Nru docker.io-0.9.1~dfsg1/docs/Dockerfile docker.io-1.3.2~dfsg1/docs/Dockerfile --- docker.io-0.9.1~dfsg1/docs/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,49 @@ +# +# See the top level Makefile in https://github.com/docker/docker for usage. +# +FROM debian:jessie +MAINTAINER Sven Dowideit (@SvenDowideit) + +RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext python-dev libssl-dev + +RUN pip install mkdocs + +# add MarkdownTools to get transclusion +# (future development) +#RUN easy_install -U setuptools +#RUN pip install MarkdownTools2 + +# this version works, the current versions fail in different ways +RUN pip install awscli==1.4.4 pyopenssl==0.12 + +# make sure the git clone is not an old cache - we've published old versions a few times now +ENV CACHE_BUST Jul2014 + +# get my sitemap.xml branch of mkdocs and use that for now +RUN git clone https://github.com/SvenDowideit/mkdocs &&\ + cd mkdocs/ &&\ + git checkout docker-markdown-merge &&\ + ./setup.py install + +ADD . /docs +ADD MAINTAINERS /docs/sources/humans.txt +WORKDIR /docs + +RUN VERSION=$(cat /docs/VERSION) &&\ + MAJOR_MINOR="${VERSION%.*}" &&\ + for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "
  • Version v$i
  • " ; done > /docs/sources/versions.html_fragment &&\ + GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ + GITCOMMIT=$(cat /docs/GITCOMMIT) &&\ + AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ + BUILD_DATE=$(date) &&\ + sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html + +# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525 +EXPOSE 8000 + +CMD ["mkdocs", "serve"] diff -Nru docker.io-0.9.1~dfsg1/docs/docs-update.py docker.io-1.3.2~dfsg1/docs/docs-update.py --- docker.io-0.9.1~dfsg1/docs/docs-update.py 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/docs-update.py 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,239 @@ +#!/usr/bin/env python + +# +# Sven's quick hack script to update the documentation +# +# call with: +# ./docs/update.py /usr/bin/docker +# + +import datetime +import re +from sys import argv +import subprocess +import os +import os.path + +script, docker_cmd = argv + +date_string = datetime.date.today().strftime('%B %Y') + + +def print_usage(outtext, docker_cmd, command): + try: + help_string = subprocess.check_output( + "".join((docker_cmd, " ", command, " --help")), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + for l in str(help_string).strip().split("\n"): + l = l.rstrip() + if l == '': + outtext.write("\n") + else: + # `docker --help` tells the user the path they called it with + l = re.sub(docker_cmd, "docker", l) + outtext.write(" {}\n".format(l)) + outtext.write("\n") + + +# TODO: look for an complain about any missing commands +def update_cli_reference(): + originalFile = "docs/sources/reference/commandline/cli.md" + os.rename(originalFile, originalFile+".bak") + + intext = open("{}.bak".format(originalFile), "r") + outtext = open(originalFile, "w") + + mode = 'p' + space = " " + command = "" + # 2 mode line-by line parser + for line in intext: + if mode == 'p': + # Prose + match = re.match("( \s*)Usage: docker ([a-z]+)", line) + if match: + # the begining of a Docker command usage block + space = match.group(1) + command = match.group(2) + mode = 'c' + else: + match = re.match("( \s*)Usage of .*docker.*:", line) + if match: + # the begining of the Docker --help usage block + space = match.group(1) + command = "" + mode = 'c' + else: + outtext.write(line) + else: + # command usage block + match = re.match("("+space+")(.*)|^$", line) + if not match: + # The end of the current usage block + # Shell out to run docker to see the new output + print_usage(outtext, docker_cmd, command) + outtext.write(line) + mode = 'p' + if mode == 'c': + print_usage(outtext, docker_cmd, command) + + +def update_man_pages(): + cmds = [] + try: + help_string = subprocess.check_output( + "".join((docker_cmd)), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + for l in str(help_string).strip().split("\n"): + l = l.rstrip() + if l != "": + match = re.match(" (.*?) .*", l) + if match: + cmds.append(match.group(1)) + + desc_re = re.compile( + r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*", + re.MULTILINE | re.DOTALL + ) + + example_re = re.compile( + r".*# EXAMPLES?(.*)# HISTORY.*", + re.MULTILINE | re.DOTALL + ) + + history_re = re.compile( + r".*# HISTORY(.*)", + re.MULTILINE | re.DOTALL + ) + + for command in cmds: + print "COMMAND: "+command + history = "" + description = "" + examples = "" + if os.path.isfile("docs/man/docker-"+command+".1.md"): + intext = open("docs/man/docker-"+command+".1.md", "r") + txt = intext.read() + intext.close() + match = desc_re.match(txt) + if match: + description = match.group(1) + match = example_re.match(txt) + if match: + examples = match.group(1) + match = history_re.match(txt) + if match: + history = match.group(1).strip() + + usage = "" + usage_description = "" + params = {} + key_params = {} + + try: + help_string = subprocess.check_output( + "".join((docker_cmd, " ", command, " --help")), + stderr=subprocess.STDOUT, + shell=True + ) + except subprocess.CalledProcessError, e: + help_string = e.output + + last_key = "" + for l in str(help).split("\n"): + l = l.rstrip() + if l != "": + match = re.match("Usage: docker {}(.*)".format(command), l) + if match: + usage = match.group(1).strip() + else: + match = re.match(" (-+)(.*) \s+(.*)", l) + if match: + last_key = match.group(2).rstrip() + key_params[last_key] = match.group(1)+last_key + params[last_key] = match.group(3) + else: + if last_key != "": + params[last_key] = "{}\n{}".format(params[last_key], l) + else: + if usage_description != "": + usage_description = usage_description + "\n" + usage_description = usage_description + l + + # replace [OPTIONS] with the list of params + options = "" + match = re.match("\[OPTIONS\](.*)", usage) + if match: + usage = match.group(1) + + new_usage = "" + # TODO: sort without the `-`'s + for key in sorted(params.keys(), key=lambda s: s.lower()): + # split on commas, remove --?.*=.*, put in *'s mumble + ps = [] + opts = [] + for k in key_params[key].split(","): + match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip()) + if match: + p = "**{}{}**".format(match.group(1), match.group(2)) + o = "**{}{}**".format(match.group(1), match.group(2)) + if match.group(3): + val = match.group(3) + if val == "\"\"": + val = match.group(2).upper() + p = "{}[=*{}*]".format(p, val) + val = match.group(3) + if val in ("true", "false"): + params[key] = params[key].rstrip() + if not params[key].endswith('.'): + params[key] = params[key]+ "." + params[key] = "{} The default is *{}*.".format(params[key], val) + val = "*true*|*false*" + o = "{}={}".format(o, val) + ps.append(p) + opts.append(o) + else: + print "nomatch:{}".format(k) + new_usage = "{}\n[{}]".format(new_usage, "|".join(ps)) + options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key]) + if new_usage != "": + new_usage = "{}\n".format(new_usage.strip()) + usage = new_usage + usage + + outtext = open("docs/man/docker-{}.1.md".format(command), "w") + outtext.write("""% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +""") + outtext.write("docker-{} - {}\n\n".format(command, usage_description)) + outtext.write("# SYNOPSIS\n**docker {}**\n{}\n\n".format(command, usage)) + if description != "": + outtext.write("# DESCRIPTION{}".format(description)) + if options == "": + options = "There are no available options.\n\n" + outtext.write("# OPTIONS\n{}".format(options)) + if examples != "": + outtext.write("# EXAMPLES{}".format(examples)) + outtext.write("# HISTORY\n") + if history != "": + outtext.write("{}\n".format(history)) + recent_history_re = re.compile( + ".*{}.*".format(date_string), + re.MULTILINE | re.DOTALL + ) + if not recent_history_re.match(history): + outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) + outtext.close() + +# main +update_cli_reference() +update_man_pages() diff -Nru docker.io-0.9.1~dfsg1/docs/.gitignore docker.io-1.3.2~dfsg1/docs/.gitignore --- docker.io-0.9.1~dfsg1/docs/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/.gitignore 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +# generated by man/man/md2man-all.sh +man1/ +man5/ +# avoid commiting the awsconfig file used for releases +awsconfig diff -Nru docker.io-0.9.1~dfsg1/docs/MAINTAINERS docker.io-1.3.2~dfsg1/docs/MAINTAINERS --- docker.io-0.9.1~dfsg1/docs/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,4 @@ +Fred Lifton (@fredlf) +James Turnbull (@jamtur01) +Sven Dowideit (@SvenDowideit) +O.S. Tezer (@OSTezer) diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker.1.md docker.io-1.3.2~dfsg1/docs/man/docker.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,208 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [arg...] + +# DESCRIPTION +**docker** has two distinct functions. It is used for starting the Docker +daemon and to run the CLI (i.e., to command the daemon to manage images, +containers etc.) So **docker** is both a server, as a daemon, and a client +to the daemon, through the CLI. + +To run the Docker daemon you do not specify any of the commands listed below but +must specify the **-d** option. The other options listed below are for the +daemon only. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**-D**=*true*|*false* + Enable debug mode. Default is false. + +**-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--api-enable-cors**=*true*|*false* + Enable CORS headers in the remote API. Default is false. + +**-b**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b + +**-d**=*true*|*false* + Enable daemon mode. Default is false. + +**--dns**="" + Force Docker to use specific DNS servers + +**-g**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) + +**--icc**=*true*|*false* + Enable inter\-container communication. Default is true. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Disable Docker's addition of iptables rules. Default is true. + +**--mtu**=VALUE + Set the containers network mtu. Default is `1500`. + +**-p**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--registry-mirror=:// + Prepend a registry mirror to be used for image pulls. May be specified multiple times. + +**-s**="" + Force the Docker runtime to use a specific storage driver. + +**-v**=*true*|*false* + Print version information and quit. Default is false. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. + +# COMMANDS +**docker-attach(1)** + Attach to a running container + +**docker-build(1)** + Build an image from a Dockerfile + +**docker-commit(1)** + Create a new image from a container's changes + +**docker-cp(1)** + Copy files/folders from a container's filesystem to the host at path + +**docker-create(1)** + Create a new container + +**docker-diff(1)** + Inspect changes on a container's filesystem + +**docker-events(1)** + Get real time events from the server + +**docker-exec(1)** + Run a command in a running container + +**docker-export(1)** + Stream the contents of a container as a tar archive + +**docker-history(1)** + Show the history of an image + +**docker-images(1)** + List images + +**docker-import(1)** + Create a new filesystem image from the contents of a tarball + +**docker-info(1)** + Display system-wide information + +**docker-inspect(1)** + Return low-level information on a container + +**docker-kill(1)** + Kill a running container (which includes the wrapper process and everything +inside it) + +**docker-load(1)** + Load an image from a tar archive + +**docker-login(1)** + Register or Login to a Docker registry server + +**docker-logout(1)** + Log the user out of a Docker registry server + +**docker-logs(1)** + Fetch the logs of a container + +**docker-pause(1)** + Pause all processes within a container + +**docker-port(1)** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + +**docker-ps(1)** + List containers + +**docker-pull(1)** + Pull an image or a repository from a Docker registry server + +**docker-push(1)** + Push an image or a repository to a Docker registry server + +**docker-restart(1)** + Restart a running container + +**docker-rm(1)** + Remove one or more containers + +**docker-rmi(1)** + Remove one or more images + +**docker-run(1)** + Run a command in a new container + +**docker-save(1)** + Save an image to a tar archive + +**docker-search(1)** + Search for an image in the Docker index + +**docker-start(1)** + Start a stopped container + +**docker-stop(1)** + Stop a running container + +**docker-tag(1)** + Tag an image into a repository + +**docker-top(1)** + Lookup the running processes of a container + +**docker-unpause(1)** + Unpause all processes within a container + +**docker-version(1)** + Show the Docker version information + +**docker-wait(1)** + Block until a container stops, then print its exit code + +# EXAMPLES + +For specific examples please see the man page for the specific Docker command. +For example: + + man docker run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based + on docker.com source material and internal work. diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-attach.1.md docker.io-1.3.2~dfsg1/docs/man/docker-attach.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-attach.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-attach.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--no-stdin**[=*false*]] +[**--sig-proxy**[=*true*]] + CONTAINER + +# DESCRIPTION +If you **docker run** a container in detached mode (**-d**), you can reattach to +the detached container with **docker attach** using the container's ID or name. + +You can detach from the container again (and leave it running) with `CTRL-p +CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the +container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. +When you detach from a container the exit code will be returned to +the client. + +# OPTIONS +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-build.1.md docker.io-1.3.2~dfsg1/docs/man/docker-build.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-build.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-build.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,121 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build a new image from the source code at PATH + +# SYNOPSIS +**docker build** +[**--force-rm**[=*false*]] +[**--no-cache**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*TAG*]] + PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When a single Dockerfile is given as the URL, then no context is set. +When a Git repository is set as the **URL**, the repository is used +as context. + +# OPTIONS +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the verbose output generated by the containers. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository name (and optionally a tag) to be applied to the resulting image in case of success + +# EXAMPLES + +## Building an image using a Dockefile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `ADD` instruction +in the Dockerfile. Note: If you include a tar file (a good practice!), then +Docker will automatically extract the contents of the tar file +specified within the `ADD` instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. There are +no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbtrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:V2.1 + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss + +When you list the images, the image above will have the tag `latest`. + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified Github repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the Github repository is a dedicated +repository. + + docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache + +Note: You can set an arbitrary Git repository via the `git://` schema. + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-commit.1.md docker.io-1.3.2~dfsg1/docs/man/docker-commit.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-commit.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-commit.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,41 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] + CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Using an existing container's name or ID you can create a new image. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run docker ps to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-cp.1.md docker.io-1.3.2~dfsg1/docs/man/docker-cp.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-cp.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-cp.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders from the PATH to the HOSTPATH + +# SYNOPSIS +**docker cp** +CONTAINER:PATH HOSTPATH + +# DESCRIPTION +Copy files/folders from a container's filesystem to the host +path. Paths are relative to the root of the filesystem. Files +can be copied from a running or stopped container. + +# OPTIONS +There are no available options. + +# EXAMPLES +An important shell script file, created in a bash shell, is copied from +the exited container to the current dir on the host: + + # docker cp c071f3c3ee81:setup.sh . + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-create.1.md docker.io-1.3.2~dfsg1/docs/man/docker-create.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-create.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-create.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,140 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpuset**[=*CPUSET*]] +[**--device**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**-i**|**--interactive**[=*false*]] +[**--link**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--privileged**[=*false*]] +[**--restart**[=*RESTART*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**-v**|**--volume**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] + IMAGE [COMMAND] [ARG...] + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**-c**, **--cpu-shares**=0 + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file + +**--cpuset**="" + CPUs in which to allow execution (0-3, 0,1) + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + +**--dns-search**=[] + Set custom DNS search domains + +**--dns**=[] + Set custom DNS servers + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port from the container without publishing it to your host + +**-h**, **--hostname**="" + Container host name + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--link**=[] + Add link to another container in the form of name:alias + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + +**--name**="" + Assign a name to the container + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + (use 'docker port' to see the actual mapping) + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Username or UID + +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-diff.1.md docker.io-1.3.2~dfsg1/docs/man/docker-diff.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-diff.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-diff.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes on a container's filesystem + +# SYNOPSIS +**docker diff** +CONTAINER + +# DESCRIPTION +Inspect changes on a container's filesystem. You can use the full or +shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +There are no available options. + +# EXAMPLES +Inspect the changes to on a nginx container: + + # docker diff 1fdfd1f54c1b + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-events.1.md docker.io-1.3.2~dfsg1/docs/man/docker-events.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-events.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-events.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +# OPTIONS +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + [2014-04-12 18:23:04 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2014-04-12' + [2014-04-12 18:11:28 -0400 EDT] c655dbf640dc: (from whenry/testimage:latest) create + [2014-04-12 18:11:28 -0400 EDT] c655dbf640dc: (from whenry/testimage:latest) start + [2014-04-12 18:14:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) create + [2014-04-12 18:14:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:22:44 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:22:44 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + [2014-04-12 18:23:04 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) die + [2014-04-12 18:23:13 -0400 EDT] 786d69800457: (from whenry/testimage:latest) stop + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-exec.1.md docker.io-1.3.2~dfsg1/docs/man/docker-exec.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-exec.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-exec.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,29 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% SEPT 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**[=*false*]] +[**-i**|**--interactive**[=*false*]] +[**-t**|**--tty**[=*false*]] + CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +# Options + +**-d**, **--detach**=*true*|*false* + Detached mode. This runs the new process in the background. + +**-i**, **--interactive**=*true*|*false* + When set to true, keep STDIN open even if not attached. The default is false. + +**-t**, **--tty**=*true*|*false* + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of the process. This can be used, for example, to run a throwaway +interactive shell. The default value is false. diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-export.1.md docker.io-1.3.2~dfsg1/docs/man/docker-export.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-export.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-export.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a filesystem as a tar archive to STDOUT + +# SYNOPSIS +**docker export** +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +# OPTIONS +There are no available options. + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called test.tar: + + # docker export angry_bell > test.tar + # ls *.tar + test.tar + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/Dockerfile docker.io-1.3.2~dfsg1/docs/man/Dockerfile --- docker.io-0.9.1~dfsg1/docs/man/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +FROM golang:1.3 +RUN mkdir -p /go/src/github.com/cpuguy83 +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... +CMD ["/go/bin/go-md2man", "--help"] diff -Nru docker.io-0.9.1~dfsg1/docs/man/Dockerfile.5.md docker.io-1.3.2~dfsg1/docs/man/Dockerfile.5.md --- docker.io-0.9.1~dfsg1/docs/man/Dockerfile.5.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/Dockerfile.5.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,207 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. The +**Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the **docker build** command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + +FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + +**sudo docker build .** + -- runs the steps and commits them, building a final image + The path to the source repository defines where to find the context of the + build. The build is run by the docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + "Sending build context to Docker daemon" when the context is sent to the daemon. + +**sudo docker build -t repository/tag .** + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + +Docker re-uses intermediate images whenever possible. This significantly +accelerates the *docker build* process. + +# FORMAT + +**FROM image** +or +**FROM image:tag** + -- The FROM instruction sets the base image for subsequent instructions. A + valid Dockerfile must have FROM as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + -- FROM must be he first non-comment instruction in Dockerfile. + -- FROM may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image id output by the commit before + each new FROM command. + -- If no tag is given to the FROM instruction, latest is assumed. If the used + tag does not exist, an error is returned. + +**MAINTAINER** + --The MAINTAINER instruction sets the Author field for the generated images. + +**RUN** + --RUN has two forms: + **RUN ** + -- (the command is run in a shell - /bin/sh -c) + **RUN ["executable", "param1", "param2"]** + --The above is executable form. + --The RUN instruction executes any commands in a new layer on top of the + current image and commits the results. The committed image is used for the next + step in Dockerfile. + --Layering RUN instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to RUN commands using a base image that does not contain /bin/sh. + +**CMD** + --CMD has three forms: + **CMD ["executable", "param1", "param2"]** This is the preferred form, the + exec form. + **CMD ["param1", "param2"]** This command provides default parameters to + ENTRYPOINT) + **CMD command param1 param2** This command is run as a shell. + --There can be only one CMD in a Dockerfile. If more than one CMD is listed, only + the last CMD takes effect. + The main purpose of a CMD is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an ENTRYPOINT must be specified. + When used in the shell or exec formats, the CMD instruction sets the command to + be executed when running the image. + If you use the shell form of the CMD, the executes in /bin/sh -c: + **FROM ubuntu** + **CMD echo "This is a test." | wc -** + If you run without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of CMD. All additional parameters must be individually expressed + as strings in the array: + **FROM ubuntu** + **CMD ["/usr/bin/wc","--help"]** + To make the container run the same executable every time, use ENTRYPOINT in + combination with CMD. + If the user specifies arguments to docker run, the specified commands override + the default in CMD. + Do not confuse **RUN** with **CMD**. RUN runs a command and commits the result. CMD + executes nothing at build time, but specifies the intended command for the + image. + +**EXPOSE** + --**EXPOSE [...]** + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links, and to set up port redirection on the host + system. + +**ENV** + --**ENV ** + The ENV instruction sets the environment variable to + the value . This value is passed to all future RUN instructions. This is + functionally equivalent to prefixing the command with **=**. The + environment variables that are set with ENV persist when a container is run + from the resulting image. Use docker inspect to inspect these values, and + change them using docker run **--env =.** + + Note that setting Setting **ENV DEBIAN_FRONTEND noninteractive** may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: **docker run -t -i image bash** + +**ADD** + --**ADD ... ** The ADD instruction copies new files, directories + or remote file URLs to the filesystem of the container at path . + Mutliple resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). is the absolute path to + which the source is copied inside the target container. All new files and + directories are created with mode 0755, with uid and gid 0. + +**ENTRYPOINT** + --**ENTRYPOINT** has two forms: ENTRYPOINT ["executable", "param1", "param2"] + (This is like an exec, and is the preferred form.) ENTRYPOINT command param1 + param2 (This is running as a shell.) An ENTRYPOINT helps you configure a + container that can be run as an executable. When you specify an ENTRYPOINT, + the whole container runs as if it was only that executable. The ENTRYPOINT + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of CMD. This allows + arguments to be passed to the entrypoint, for instance docker run -d + passes the -d argument to the ENTRYPOINT. Specify parameters either in the + ENTRYPOINT JSON array (as in the preferred exec form above), or by using a CMD + statement. Parameters in the ENTRYPOINT are not overwritten by the docker run + arguments. Parameters specifies via CMD are overwritten by docker run + arguments. Specify a plain string for the ENTRYPOINT, and it will execute in + /bin/sh -c, like a CMD instruction: + FROM ubuntu + ENTRYPOINT wc -l - + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a CMD: + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + +**VOLUME** + --**VOLUME ["/data"]** + The VOLUME instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- **USER daemon** + The USER instruction sets the username or UID that is used when running the + image. + +**WORKDIR** + -- **WORKDIR /path/to/workdir** + The WORKDIR instruction sets the working directory for the **RUN**, **CMD**, and **ENTRYPOINT** Dockerfile commands that follow it. + It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: + **WORKDIR /a WORKDIR b WORKDIR c RUN pwd** + In the above example, the output of the **pwd** command is **a/b/c**. + +**ONBUILD** + -- **ONBUILD [INSTRUCTION]** + The ONBUILD instruction adds a trigger instruction to the image, which is + executed at a later time, when the image is used as the base for another + build. The trigger is executed in the context of the downstream build, as + if it had been inserted immediately after the FROM instruction in the + downstream Dockerfile. Any build instruction can be registered as a + trigger. This is useful if you are building an image to be + used as a base for building other images, for example an application build + environment or a daemon to be customized with a user-specific + configuration. For example, if your image is a reusable python + application builder, it requires application source code to be + added in a particular directory, and might require a build script + to be called after that. You can't just call ADD and RUN now, because + you don't yet have access to the application source code, and it + is different for each application build. Providing + application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-history.1.md docker.io-1.3.2~dfsg1/docs/man/docker-history.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-history.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-history.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,34 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] + IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ sudo docker history fedora + IMAGE CREATED CREATED BY SIZE + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-images.1.md docker.io-1.3.2~dfsg1/docs/man/docker-images.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-images.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-images.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,90 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**-a**|**--all**[=*false*]] +[**-f**|**--filter**[=*[]*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] + [NAME] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**-f**, **--filter**=[] + Provide filter values (i.e. 'dangling=true') + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and VIRTUAL SIZE. + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +## List images dependency tree hierarchy + +To list the images in the local repository (not the registry) in a dependency +tree format, use the **-t** option. + + docker images -t + +This displays a staggered hierarchy tree where the less indented image is +the oldest with dependent image layers branching inward (to the right) on +subsequent lines. The newest or top level image layer is listed last in +any tree branch. + +## List images in GraphViz format + +To display the list in a format consumable by a GraphViz tools run with +**-v**. For example to produce a .png graph file of the hierarchy use: + + docker images --viz | dot -Tpng -o docker.png + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-import.1.md docker.io-1.3.2~dfsg1/docs/man/docker-import.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-import.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-import.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,43 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +URL|- [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-info.1.md docker.io-1.3.2~dfsg1/docs/man/docker-info.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-info.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-info.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,44 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** + + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the number of containers and images, pool name, +data file, metadata file, data space used, total data space, metadata space used +, total metadata space, execution driver, and the kernel version. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Display Docker system information + +Here is a sample output: + + # docker info + Containers: 14 + Images: 52 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 80 + Execution Driver: native-0.2 + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-inspect.1.md docker.io-1.3.2~dfsg1/docs/man/docker-inspect.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-inspect.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-inspect.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,229 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on a container or image + +# SYNOPSIS +**docker inspect** +[**-f**|**--format**[=*FORMAT*]] +CONTAINER|IMAGE [CONTAINER|IMAGE...] + +# DESCRIPTION + +This displays all the information available in Docker for a given +container or image. By default, this will render all results in a JSON +array. If a format is specified, the given template will be executed for +each result. + +# OPTIONS +**-f**, **--format**="" + Format the output using the given go template. + +# EXAMPLES + +## Getting information on a container + +To get information on a container use it's ID or instance name: + + #docker inspect 1eb5fabf5a03 + [{ + "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", + "Created": "2014-04-04T21:33:52.02361335Z", + "Path": "/usr/sbin/nginx", + "Args": [], + "Config": { + "Hostname": "1eb5fabf5a03", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": true, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/usr/sbin/nginx" + ], + "Dns": null, + "DnsSearch": null, + "Image": "summit/nginx", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": { + "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650", + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650" + } + }, + "State": { + "Running": true, + "Pid": 858, + "ExitCode": 0, + "StartedAt": "2014-04-04T21:33:54.16259207Z", + "FinishedAt": "0001-01-01T00:00:00Z", + "Ghost": false + }, + "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6", + "NetworkSettings": { + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "Gateway": "172.17.42.1", + "Bridge": "docker0", + "PortMapping": null, + "Ports": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + } + }, + "ResolvConfPath": "/etc/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", + "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", + "Name": "/ecstatic_ptolemy", + "Driver": "devicemapper", + "ExecDriver": "native-0.1", + "Volumes": {}, + "VolumesRW": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "DriverOptions": { + "lxc": null + }, + "CliAddress": "" + } + +## Getting the IP address of a container instance + +To get the IP address of a container use: + + # docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + # docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 + + 80/tcp -> 80 + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information + on it. + + # docker inspect 58394af37342 + [{ + "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9", + "parent": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "created": "2014-02-03T16:10:40.500814677Z", + "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5", + "container_config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD fedora-20-dummy.tar.xz in /" + ], + "Dns": null, + "DnsSearch": null, + "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "docker_version": "0.6.3", + "author": "I P Babble \u003clsm5@ipbabble.com\u003e - ./buildcontainers.sh", + "config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": null, + "Dns": null, + "DnsSearch": null, + "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "architecture": "x86_64", + "Size": 385520098 + }] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-kill.1.md docker.io-1.3.2~dfsg1/docs/man/docker-kill.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-kill.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-kill.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,24 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**-s**|**--signal**[=*"KILL"*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**-s**, **--signal**="KILL" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-load.1.md docker.io-1.3.2~dfsg1/docs/man/docker-load.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-load.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-load.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive on STDIN + +# SYNOPSIS +**docker load** +[**-i**|**--input**[=*INPUT*]] + + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + +# OPTIONS +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN + +# EXAMPLES + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ sudo docker load --input fedora.tar + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-login.1.md docker.io-1.3.2~dfsg1/docs/man/docker-login.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-login.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-login.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. + +# SYNOPSIS +**docker login** +[**-e**|**--email**[=*EMAIL*]] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] + [SERVER] + +# DESCRIPTION +Register or Login to a docker registry server, if no server is +specified "https://index.docker.io/v1/" is the default. If you want to +login to a private registry you can specify this by adding the server name. + +# OPTIONS +**-e**, **--email**="" + Email + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a local registry + + # docker login localhost:8080 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-logout.1.md docker.io-1.3.2~dfsg1/docs/man/docker-logout.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-logout.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-logout.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log the user out from a Docker registry, if no server is +specified "https://index.docker.io/v1/" is the default. If you want to +log out from a private registry you can specify this by adding the server name. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a local registry + + # docker logout localhost:8080 + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-logs.1.md docker.io-1.3.2~dfsg1/docs/man/docker-logs.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-logs.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-logs.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**[=*false*]] +[**-t**|**--timestamps**[=*false*]] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e. your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container’s stdout and stderr. + +# OPTIONS +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="all" + Output the specified number of lines at the end of logs (defaults to all logs) + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-pause.1.md docker.io-1.3.2~dfsg1/docs/man/docker-pause.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-pause.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-pause.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within a container + +# SYNOPSIS +**docker pause** +CONTAINER + +# DESCRIPTION + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-port.1.md docker.io-1.3.2~dfsg1/docs/man/docker-port.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-port.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-port.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# EXAMPLES +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +ask for just a specific mapping: + + $ docker ps test + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + $ docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + $ docker port test 7890/tcp + 0.0.0.0:4321 + $ docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + $ docker port test 7890 + 0.0.0.0:4321 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-ps.1.md docker.io-1.3.2~dfsg1/docs/man/docker-ps.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-ps.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-ps.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,76 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**[=*false*]] +[**--before**[=*BEFORE*]] +[**-f**|**--filter**[=*[]*]] +[**-l**|**--latest**[=*false*]] +[**-n**[=*-1*]] +[**--no-trunc**[=*false*]] +[**-q**|**--quiet**[=*false*]] +[**-s**|**--size**[=*false*]] +[**--since**[=*SINCE*]] + + +# DESCRIPTION + +List the containers in the local repository. By default this show only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--before**="" + Show only container created before Id or Name, include non-running ones. + +**-f**, **--filter**=[] + Provide filter values. Valid filters: + exited= - containers with exit code of + +**-l**, **--latest**=*true*|*false* + Show only the latest created container, include non-running ones. The default is *false*. + +**-n**=-1 + Show n last created containers, include non-running ones. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display sizes. The default is *false*. + +**--since**="" + Show only containers created since Id or Name, include non-running ones. + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-pull.1.md docker.io-1.3.2~dfsg1/docs/man/docker-pull.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-pull.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-pull.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from the registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**[=*false*]] +NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from the registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name are pulled down including any tags. +It is also possible to specify a non-default registry to pull from. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +# EXAMPLES + +# Pull a repository with multiple images +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for fedora' + + $ sudo docker pull fedora + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + + Status: Downloaded newer image for fedora + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + +# Pull an image, manually specifying path to the registry and tag +# Note that if the image is previously downloaded then the status would be +# 'Status: Image is up to date for registry.hub.docker.com/fedora:20' + + $ sudo docker pull registry.hub.docker.com/fedora:20 + Pulling repository fedora + 3f2fed40e4b0: Download complete + 511136ea3c5a: Download complete + fd241224e9cf: Download complete + + Status: Downloaded newer image for registry.hub.docker.com/fedora:20 + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + fedora 20 3f2fed40e4b0 4 days ago 372.7 MB + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-push.1.md docker.io-1.3.2~dfsg1/docs/man/docker-push.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-push.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-push.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to the registry + +# SYNOPSIS +**docker push** +NAME[:TAG] + +# DESCRIPTION +Push an image or a repository to a registry. The default registry is the Docker +Hub located at [hub.docker.com](https://hub.docker.com/). However the +image can be pushed to another, perhaps private, registry as demonstrated in +the example below. + +# OPTIONS +There are no available options. + +# EXAMPLES + +# Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name: + + # docker commit c16378f943fe rhel-httpd + +Now push the image to the registry using the image ID. In this example +the registry is on host named registry-host and listening on port 5000. +Default Docker commands will push to the default `hub.docker.com` +registry. Instead, push to the local registry, which is on a host called +registry-host*. To do this, tag the image with the host name or IP +address, and the port of the registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-restart.1.md docker.io-1.3.2~dfsg1/docs/man/docker-restart.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-restart.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-restart.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,22 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart a running container + +# SYNOPSIS +**docker restart** +[**-t**|**--time**[=*10*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**-t**, **--time**=10 + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-rm.1.md docker.io-1.3.2~dfsg1/docs/man/docker-rm.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-rm.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-rm.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,53 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**[=*false*]] +[**-l**|**--link**[=*false*]] +[**-v**|**--volumes**[=*false*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the \fB-f\fR option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +##Removing a container using its ID## + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +##Removing a container using the container name## + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-rmi.1.md docker.io-1.3.2~dfsg1/docs/man/docker-rmi.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-rmi.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-rmi.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**[=*false*]] +[**--no-prune**[=*false*]] +IMAGE [IMAGE...] + +# DESCRIPTION + +This will remove one or more images from the host node. This does not +remove images from a registry. You cannot remove an image of a running +container unless you use the **-f** option. To see all images on a host +use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing and image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-run.1.md docker.io-1.3.2~dfsg1/docs/man/docker-run.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-run.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-run.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,427 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpuset**[=*CPUSET*]] +[**-d**|**--detach**[=*false*]] +[**--device**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**-i**|**--interactive**[=*false*]] +[**--security-opt**[=*[]*]] +[**--link**[=*[]*]] +[**--lxc-conf**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--name**[=*NAME*]] +[**--net**[=*"bridge"*]] +[**-P**|**--publish-all**[=*false*]] +[**-p**|**--publish**[=*[]*]] +[**--privileged**[=*false*]] +[**--restart**[=*POLICY*]] +[**--rm**[=*false*]] +[**--sig-proxy**[=*true*]] +[**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] +[**-v**|**--volume**[=*[]*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] + IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS + +**-a**, **--attach**=*stdin*|*stdout*|*stderr* + Attach to stdin, stdout or stderr. In foreground mode (the default when +**-d** is not specified), **docker run** can start the process in the container +and attach the console to the process’s standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=*hostname*:*ip* + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**-c**, **--cpu-shares**=0 + CPU shares in relative weight. You can increase the priority of a container +with the -c option. By default, all containers run at the same priority and get +the same proportion of CPU cycles, but you can tell the kernel to give more +shares of CPU time to one or more containers when you start them via **docker +run**. + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file + +**--cpuset**="" + CPUs in which to allow execution (0-3, 0,1) + +**-d**, **--detach**=*true*|*false* + Detached mode. This runs the container in the background. It outputs the new +container's ID and any error messages. At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from a running container without +stopping the process by pressing the keys CTRL-P CTRL-Q. +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + +**--dns-search**=[] + Set custom DNS search domains + +**--dns**=*IP-address* + Set custom DNS servers. This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=*environment* + Set environment variables. This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + + +**--entrypoint**=*command* + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=*port* + Expose a port from the container without publishing it to your host. A +containers port can be exposed to other containers in three ways: 1) The +developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) +the operator can use the **--expose** option with **docker run**, or 3) the +container can be started with the **--link**. + +**-h**, **--hostname**=*hostname* + Sets the container host name that is available inside the container. + +**-i**, **--interactive**=*true*|*false* + When set to true, keep stdin open even if not attached. The default is false. + +**--security-opt**=*secdriver*:*name*:*value* + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + +**--link**=*name*:*alias* + Add link to another container. The format is name:alias. If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--lxc-conf**=[] + (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +**-m**, **--memory**=*memory-limit* + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the -m memory setting can be larger than physical +RAM. If a limit of 0 is specified, the container's memory is not limited. The +actual limit may be rounded up to a multiple of the operating system's page +size, if it is not already. The memory limit should be formatted as follows: +``, where unit = b, k, m or g. + +**--name**=*name* + Assign a name to the container. The operator can identify a container in +three ways: + + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + +The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--net**="bridge" + Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + +**-P**, **--publish-all**=*true*|*false* + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind the exposed +ports to a random port on the host between 49153 and 65535. To find the +mapping between the host ports and the exposed ports, use **docker port**. + +**-p**, **--publish**=[] + Publish a container's port to the host (format: ip:hostPort:containerPort | +ip::containerPort | hostPort:containerPort | containerPort) (use **docker port** to see the +actual mapping) + +**--privileged**=*true*|*false* + Give extended privileges to this container. By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + +When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + + +**--rm**=*true*|*false* + Automatically remove the container when it exits (incompatible with -d). The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**-t**, **--tty**=*true*|*false* + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is value is false. + +**-u**, **--user**="" + Username or UID + + +**-v**, **--volume**=*volume*[:ro|:rw] + Bind mount a volume to the container. + +The **-v** option can be used one or +more times to add one or more mounts to a container. These mounts can then be +used in other containers using the **--volumes-from** option. + +The volume may be optionally suffixed with :ro or :rw to mount the volumes in +read-only or read-write mode, respectively. By default, the volumes are mounted +read-write. See examples. + +**--volumes-from**=*container-id*[:ro|:rw] + Will mount volumes from the specified container identified by container-id. +Once a volume is mounted in a one container it can be shared with other +containers using the **--volumes-from** option when running those other +containers. The volumes can be shared even if the original container with the +mount is not running. + +The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as +the reference container. + + +**-w**, **--workdir**=*directory* + Working directory inside the container. The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + + +**IMAGE** + The image name or ID. You can specify a version of an image you'd like to run + the container with by adding image:tag to the command. For example, + `docker run ubuntu:14.04`. + + + +**COMMAND** + The command or program to run inside the image. + + +**ARG** + The arguments for the command to be run in the container. + +# EXAMPLES + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you’d like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Linking Containers + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, a httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label:level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label:disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label:type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-save.1.md docker.io-1.3.2~dfsg1/docs/man/docker-save.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-save.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-save.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,37 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save an image to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**-o**|**--output**[=*OUTPUT*]] +IMAGE + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ sudo docker save fedora > fedora-all.tar + $ sudo docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-search.1.md docker.io-1.3.2~dfsg1/docs/man/docker-search.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-search.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-search.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,58 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**--automated**[=*false*]] +[**--no-trunc**[=*false*]] +[**-s**|**--stars**[=*0*]] +TERM + +# DESCRIPTION + +Search an index for an image with that matches the term TERM. The table +of images returned displays the name, description (truncated by default), +number of stars awarded, whether the image is official, and whether it +is automated. + +# OPTIONS +**--automated**=*true*|*false* + Only show automated builds. The default is *false*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-s**, **--stars**=0 + Only displays with at least x stars + +# EXAMPLES + +## Search the registry for ranked images + +Search the registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ sudo docker search -s 3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search the registry for automated images + +Search the registry for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ sudo docker search -s 1 -t fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-start.1.md docker.io-1.3.2~dfsg1/docs/man/docker-start.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-start.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-start.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Restart a stopped container + +# SYNOPSIS +**docker start** +[**-a**|**--attach**[=*false*]] +[**-i**|**--interactive**[=*false*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start a stopped container. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*. + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-stop.1.md docker.io-1.3.2~dfsg1/docs/man/docker-stop.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-stop.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-stop.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**-t**|**--time**[=*10*]] + CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a running container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**-t**, **--time**=10 + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-tag.1.md docker.io-1.3.2~dfsg1/docs/man/docker-tag.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-tag.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-tag.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,59 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Tag an image into a repository + +# SYNOPSIS +**docker tag** +[**-f**|**--force**[=*false*]] + IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] + +# DESCRIPTION +This will give a new alias to an image in the repository. This refers to the +entire image name including the optional TAG after the ':'. + +# "OPTIONS" +**-f**, **--force**=*true*|*false* + When set to true, force the alias. The default is *false*. + +**REGISTRYHOST** + The hostname of the registry if required. This may also include the port +separated by a ':' + +**USERNAME** + The username or other qualifying identifier for the image. + +**NAME** + The image name. + +**TAG** + The tag you are assigning to the image. Though this is arbitrary it is +recommended to be used for a version to distinguish images with the same name. +Note that here TAG is a part of the overall name or "tag". + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force. The default is *false*. + +# EXAMPLES + +## Giving an image a new alias + +Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and +tagging it into the "fedora" repository with "version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image for a private repository + +To push an image to an private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-top.1.md docker.io-1.3.2~dfsg1/docs/man/docker-top.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-top.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-top.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,31 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Look up the running process of the container. ps-OPTION can be any of the + options you would pass to a Linux ps command. + +# OPTIONS +There are no available options. + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ sudo docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-unpause.1.md docker.io-1.3.2~dfsg1/docs/man/docker-unpause.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-unpause.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-unpause.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,24 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within a container + +# SYNOPSIS +**docker unpause** +CONTAINER + +# DESCRIPTION + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-version.1.md docker.io-1.3.2~dfsg1/docs/man/docker-version.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-version.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-version.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,15 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** + + +# OPTIONS +There are no available options. + +# HISTORY +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/docker-wait.1.md docker.io-1.3.2~dfsg1/docs/man/docker-wait.1.md --- docker.io-0.9.1~dfsg1/docs/man/docker-wait.1.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/docker-wait.1.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-wait - Block until a container stops, then print its exit code. + +# SYNOPSIS +**docker wait** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Block until a container stops, then print its exit code. + +# OPTIONS +There are no available options. + +# EXAMPLES + + $ sudo docker run -d fedora sleep 99 + 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 + $ sudo docker wait 079b83f558a2bc + 0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff -Nru docker.io-0.9.1~dfsg1/docs/man/md2man-all.sh docker.io-1.3.2~dfsg1/docs/man/md2man-all.sh --- docker.io-0.9.1~dfsg1/docs/man/md2man-all.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/md2man-all.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff -Nru docker.io-0.9.1~dfsg1/docs/man/README.md docker.io-1.3.2~dfsg1/docs/man/README.md --- docker.io-0.9.1~dfsg1/docs/man/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/man/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,70 @@ +Docker Documentation +==================== + +This directory contains the Docker user manual in the Markdown format. +Do *not* edit the man pages in the man1 directory. Instead, amend the +Markdown (*.md) files. + +# File List + + docker.md + docker-attach.md + docker-build.md + docker-commit.md + docker-cp.md + docker-diff.md + docker-events.md + docker-export.md + docker-history.md + docker-images.md + docker-import.md + docker-info.md + docker-inspect.md + docker-kill.md + docker-load.md + docker-login.md + docker-logs.md + docker-port.md + docker-ps.md + docker-pull.md + docker-push.md + docker-restart.md + docker-rmi.md + docker-rm.md + docker-run.md + docker-save.md + docker-search.md + docker-start.md + docker-stop.md + docker-tag.md + docker-top.md + docker-wait.md + Dockerfile + md2man-all.sh + +# Generating man pages from the Markdown files + +The recommended approach for generating the man pages is via a Docker +container using the supplied `Dockerfile` to create an image with the correct +environment. This uses `go-md2man`, a pure Go Markdown to man page generator. + +## Building the md2man image + +There is a `Dockerfile` provided in the `docker/docs/man` directory. + +Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: + + docker build -t docker/md2man . + +## Utilizing the image + +Once the image is built, run a container using the image with *volumes*: + + docker run -v //docker/docs/man:/docs:rw \ + -w /docs -i docker/md2man /docs/md2man-all.sh + +The `md2man` Docker container will process the Markdown files and generate +the man pages inside the `docker/docs/man/man1` directory using +Docker volumes. For more information on Docker volumes see the man page for +`docker run` and also look at the article [Sharing Directories via Volumes] +(http://docs.docker.com/use/working_with_volumes/). diff -Nru docker.io-0.9.1~dfsg1/docs/mkdocs.yml docker.io-1.3.2~dfsg1/docs/mkdocs.yml --- docker.io-0.9.1~dfsg1/docs/mkdocs.yml 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/mkdocs.yml 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,150 @@ +site_name: Docker Documentation +#site_url: http://docs.docker.com/ +site_url: / +site_description: Documentation for fast and lightweight Docker container based virtualization framework. +site_favicon: img/favicon.png + +dev_addr: '0.0.0.0:8000' + +repo_url: https://github.com/docker/docker/ + +docs_dir: sources + +include_search: true + +use_absolute_urls: true + +# theme: docker +theme_dir: ./theme/mkdocs/ +theme_center_lead: false +include_search: true + +copyright: Copyright © 2014, Docker, Inc. +google_analytics: ['UA-6096819-11', 'docker.io'] + +pages: + +# Introduction: +- ['index.md', 'About', 'Docker'] +- ['release-notes.md', 'About', 'Release Notes'] +- ['introduction/index.md', '**HIDDEN**'] +- ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] + +# Installation: +- ['installation/index.md', '**HIDDEN**'] +- ['installation/mac.md', 'Installation', 'Mac OS X'] +- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] +- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] +- ['installation/oracle.md', 'Installation', 'Oracle Linux'] +- ['installation/centos.md', 'Installation', 'CentOS'] +- ['installation/debian.md', 'Installation', 'Debian'] +- ['installation/gentoolinux.md', 'Installation', 'Gentoo'] +- ['installation/google.md', 'Installation', 'Google Cloud Platform'] +- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] +- ['installation/amazon.md', 'Installation', 'Amazon EC2'] +- ['installation/softlayer.md', 'Installation', 'IBM Softlayer'] +- ['installation/archlinux.md', 'Installation', 'Arch Linux'] +- ['installation/frugalware.md', 'Installation', 'FrugalWare'] +- ['installation/fedora.md', 'Installation', 'Fedora'] +- ['installation/openSUSE.md', 'Installation', 'openSUSE'] +- ['installation/cruxlinux.md', 'Installation', 'CRUX Linux'] +- ['installation/windows.md', 'Installation', 'Microsoft Windows'] +- ['installation/binaries.md', 'Installation', 'Binaries'] + +# User Guide: +- ['userguide/index.md', 'User Guide', 'The Docker User Guide' ] +- ['userguide/dockerhub.md', 'User Guide', 'Getting Started with Docker Hub' ] +- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ] +- ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ] +- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ] +- ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ] +- ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ] +- ['userguide/dockerrepos.md', 'User Guide', 'Working with Docker Hub' ] +- ['userguide/level1.md', '**HIDDEN**' ] +- ['userguide/level2.md', '**HIDDEN**' ] + +# Docker Hub docs: +- ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ] +- ['docker-hub/accounts.md', 'Docker Hub', 'Accounts'] +- ['docker-hub/repos.md', 'Docker Hub', 'Repositories'] +- ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds'] +- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines'] + +# Examples: +- ['examples/index.md', '**HIDDEN**'] +- ['examples/nodejs_web_app.md', 'Examples', 'Dockerizing a Node.js web application'] +- ['examples/mongodb.md', 'Examples', 'Dockerizing MongoDB'] +- ['examples/running_redis_service.md', 'Examples', 'Dockerizing a Redis service'] +- ['examples/postgresql_service.md', 'Examples', 'Dockerizing a PostgreSQL service'] +- ['examples/running_riak_service.md', 'Examples', 'Dockerizing a Riak service'] +- ['examples/running_ssh_service.md', 'Examples', 'Dockerizing an SSH service'] +- ['examples/couchdb_data_volumes.md', 'Examples', 'Dockerizing a CouchDB service'] +- ['examples/apt-cacher-ng.md', 'Examples', 'Dockerizing an Apt-Cacher-ng service'] + +# Articles +- ['articles/index.md', '**HIDDEN**'] +- ['articles/basics.md', 'Articles', 'Docker basics'] +- ['articles/networking.md', 'Articles', 'Advanced networking'] +- ['articles/security.md', 'Articles', 'Security'] +- ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] +- ['articles/host_integration.md', 'Articles', 'Automatically starting containers'] +- ['articles/baseimages.md', 'Articles', 'Creating a base image'] +- ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles'] +- ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification'] +- ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] +- ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] +- ['articles/puppet.md', 'Articles', 'Using Puppet'] +- ['articles/chef.md', 'Articles', 'Using Chef'] +- ['articles/dsc.md', 'Articles', 'Using PowerShell DSC'] +- ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using ambassador containers'] +- ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] +- ['articles/b2d_volume_resize.md', 'Articles', 'Increasing a Boot2Docker volume'] + +# Reference +- ['reference/index.md', '**HIDDEN**'] +- ['reference/commandline/index.md', '**HIDDEN**'] +- ['reference/commandline/cli.md', 'Reference', 'Command line'] +- ['reference/builder.md', 'Reference', 'Dockerfile'] +- ['faq.md', 'Reference', 'FAQ'] +- ['reference/run.md', 'Reference', 'Run Reference'] +- ['reference/api/index.md', '**HIDDEN**'] +- ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API'] +- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] +- ['reference/api/registry_api_client_libraries.md', 'Reference', 'Docker Registry API Client Libraries'] +- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] +- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.15.md', 'Reference', 'Docker Remote API v1.15'] +- ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14'] +- ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13'] +- ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12'] +- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11'] +- ['reference/api/docker_remote_api_v1.10.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] +- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] +- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API'] + +- ['jsearch.md', '**HIDDEN**'] + +# - ['static_files/README.md', 'static_files', 'README'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/layer.md', '**HIDDEN**'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/registry.md', '**HIDDEN**'] +- ['terms/container.md', '**HIDDEN**'] +- ['terms/repository.md', '**HIDDEN**'] +- ['terms/filesystem.md', '**HIDDEN**'] +- ['terms/image.md', '**HIDDEN**'] + +# Contribute: +- ['contributing/index.md', '**HIDDEN**'] +- ['contributing/contributing.md', 'Contribute', 'Contributing'] +- ['contributing/devenvironment.md', 'Contribute', 'Development environment'] diff -Nru docker.io-0.9.1~dfsg1/docs/README.md docker.io-1.3.2~dfsg1/docs/README.md --- docker.io-0.9.1~dfsg1/docs/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,162 @@ +# Docker Documentation + +The source for Docker documentation is here under `sources/` and uses extended +Markdown, as implemented by [MkDocs](http://mkdocs.org). + +The HTML files are built and hosted on `https://docs.docker.com`, and update +automatically after each change to the master or release branch of [Docker on +GitHub](https://github.com/docker/docker) thanks to post-commit hooks. The +`docs` branch maps to the "latest" documentation and the `master` (unreleased +development) branch maps to the "master" documentation. + +## Contributing + +- Follow the contribution guidelines ([see + `../CONTRIBUTING.md`](../CONTRIBUTING.md)). +- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) + +## Getting Started + +Docker documentation builds are done in a Docker container, which installs all +the required tools, adds the local `docs/` directory and builds the HTML docs. +It then starts a HTTP server on port 8000 so that you can connect and see your +changes. + +In the root of the `docker` source directory: + + $ make docs + .... (lots of output) .... + $ docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve + Running at: http://0.0.0.0:8000/ + Live reload enabled. + Hold ctrl+c to quit. + +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve` + +## Adding a new document + +New document (`.md`) files are added to the documentation builds by adding them +to the menu definition in the `docs/mkdocs.yml` file. + +## Style guide + +The documentation is written with paragraphs wrapped at 80 column lines to make +it easier for terminal use. + +### Examples + +When writing examples, give the user hints by making them resemble what they see +in their shell: + +- Indent shell examples by 4 spaces so they get rendered as code. +- Start typed commands with `$ ` (dollar space), so that they are easily + differentiated from program output. +- Program output has no prefix. +- Comments begin with `# ` (hash space). +- In-container shell commands begin with `$$ ` (dollar dollar space). + +### Images + +When you need to add images, try to make them as small as possible (e.g., as +gifs). Usually images should go in the same directory as the `.md` file which +references them, or in a subdirectory if one already exists. + +## Working using GitHub's file editor + +Alternatively, for small changes and typos you might want to use GitHub's built- +in file editor. It allows you to preview your changes right on-line (though +there can be some differences between GitHub Markdown and [MkDocs +Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be +careful not to create many commits. And you must still [sign your +work!](../CONTRIBUTING.md#sign-your-work) + +## Branches + +**There are two branches related to editing docs**: `master` and a `docs` +branch. You should always edit the documentation on a local branch of the `master` +branch, and send a PR against `master`. + +That way your edits will automatically get included in later releases, and docs +maintainers can easily cherry-pick your changes into the `docs` release branch. +In the rare case where your change is not forward-compatible, you may need to +base your changes on the `docs` branch. + +Also, now that we have a `docs` branch, we can keep the +[http://docs.docker.com](http://docs.docker.com) docs up to date with any bugs +found between Docker code releases. + +> **Warning**: When *reading* the docs, the +> [http://docs-stage.docker.com](http://docs-stage.docker.com) documentation may +> include features not yet part of any official Docker release. The `beta-docs` +> site should be used only for understanding bleeding-edge development and +> `docs.docker.com` (which points to the `docs` branch`) should be used for the +> latest official release. + +## Publishing Documentation + +To publish a copy of the documentation you need to have Docker up and running on your +machine. You'll also need a `docs/awsconfig` file containing AWS settings to deploy to. +The release script will create an s3 if needed, and will then push the files to it. + + [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 + +The `profile` name must be the same as the name of the bucket you are deploying +to - which you call from the `docker` directory: + + make AWS_S3_BUCKET=dowideit-docs docs-release + +This will publish _only_ to the `http://bucket-url/v1.2/` version of the +documentation. + +If you're publishing the current release's documentation, you need to +also update the root docs pages by running + + make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release + +> **Note:** if you are using Boot2Docker on OSX and the above command returns an error, +> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: +> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker +> host. Run `$(boot2docker shellinit)` to see the correct variable to set. The command +> will return the full `export` command, so you can just cut and paste. + +## Cherry-picking documentation changes to update an existing release. + +Whenever the core team makes a release, they publish the documentation based +on the `release` branch (which is copied into the `docs` branch). The +documentation team can make updates in the meantime, by cherry-picking changes +from `master` into any of the docs branches. + +For example, to update the current release's docs: + + git fetch upstream + git checkout -b post-1.2.0-docs-update-1 upstream/docs + # Then go through the Merge commit linked to PR's (making sure they apply + to that release) + # see https://github.com/docker/docker/commits/master + git cherry-pick -x fe845c4 + # Repeat until you have cherry picked everything you will propose to be merged + git push upstream post-1.2.0-docs-update-1 + +Then make a pull request to merge into the `docs` branch, __NOT__ into master. + +Once the PR has the needed `LGTM`s, merge it, then publish to our beta server +to test: + + git fetch upstream + git checkout post-1.2.0-docs-update-1 + git reset --hard upstream/post-1.2.0-docs-update-1 + make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release + +Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/ +to view your results and make sure what you published is what you wanted. + +When you're happy with it, publish the docs to our live site: + + make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker +(Sven Dowideit or John Costa) for assistance. + diff -Nru docker.io-0.9.1~dfsg1/docs/release.sh docker.io-1.3.2~dfsg1/docs/release.sh --- docker.io-0.9.1~dfsg1/docs/release.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/release.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +set -e + +set -o pipefail + +usage() { + cat >&2 <<'EOF' +To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file +(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file) +and set the AWS_S3_BUCKET env var to the name of your bucket. + +If you're publishing the current release's documentation, also set `BUILD_ROOT=yes` + +make AWS_S3_BUCKET=docs-stage.docker.com docs-release + +will then push the documentation site to your s3 bucket. +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage + +VERSION=$(cat VERSION) + +if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then + if [ "${VERSION%-dev}" != "$VERSION" ]; then + echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" + exit 1 + fi + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Allow: / +EOF + +else + cat > ./sources/robots.txt <<'EOF' +User-agent: * +Disallow: / +EOF +fi + +# Remove the last version - 1.0.2-dev -> 1.0 +MAJOR_MINOR="v${VERSION%.*}" +export MAJOR_MINOR + +export BUCKET=$AWS_S3_BUCKET + +export AWS_CONFIG_FILE=$(pwd)/awsconfig +[ -e "$AWS_CONFIG_FILE" ] || usage +export AWS_DEFAULT_PROFILE=$BUCKET + +echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE" + +setup_s3() { + echo "Create $BUCKET" + # Try creating the bucket. Ignore errors (it might already exist). + aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true + # Check access to the bucket. + echo "test $BUCKET exists" + aws s3 --profile $BUCKET ls s3://$BUCKET + # Make the bucket accessible through website endpoints. + echo "make $BUCKET accessible as a website" + #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html + s3conf=$(cat s3_website.json | envsubst) + echo + echo $s3conf + echo + aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" +} + +build_current_documentation() { + mkdocs build +} + +upload_current_documentation() { + src=site/ + dst=s3://$BUCKET$1 + + echo + echo "Uploading $src" + echo " to $dst" + echo + #s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst" + #aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --acl public-read "site/search_content.json" "$dst" + + # a really complicated way to send only the files we want + # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go + # versions.html_fragment + endings=( json txt html xml css js gif png JPG ttf svg woff html_fragment ) + for i in ${endings[@]}; do + include="" + for j in ${endings[@]}; do + if [ "$i" != "$j" ];then + include="$include --exclude *.$j" + fi + done + include="--include *.$i $include" + echo "uploading *.$i" + run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ + $include \ + --exclude *.text* \ + --exclude *.*~ \ + --exclude *Dockerfile \ + --exclude *.DS_Store \ + --exclude *.psd \ + --exclude *.ai \ + --exclude *.eot \ + --exclude *.otf \ + --exclude *.rej \ + --exclude *.rst \ + --exclude *.orig \ + --exclude *.py \ + $src $dst" + echo "=======================" + #echo "$run" + #echo "=======================" + $run + done +} + +setup_s3 + +# Default to only building the version specific docs so we don't clober the latest by accident with old versions +if [ "$BUILD_ROOT" == "yes" ]; then + echo "Building root documentation" + build_current_documentation + upload_current_documentation +fi + +#build again with /v1.0/ prefix +sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml +echo "Building the /$MAJOR_MINOR/ documentation" +build_current_documentation +upload_current_documentation "/$MAJOR_MINOR/" diff -Nru docker.io-0.9.1~dfsg1/docs/s3_website.json docker.io-1.3.2~dfsg1/docs/s3_website.json --- docker.io-0.9.1~dfsg1/docs/s3_website.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/docs/s3_website.json 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "ErrorDocument": { + "Key": "jsearch/index.html" + }, + "IndexDocument": { + "Suffix": "index.html" + }, + "RoutingRules": [ + { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "jsearch/" } }, + { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-io/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, + { "Condition": { "KeyPrefixEquals": "examples/hello_world/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "examples/python_web_app/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_volumes/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockervolumes/" } }, + { "Condition": { "KeyPrefixEquals": "use/working_with_links_names/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/workingwithrepository/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerrepos/" } }, + { "Condition": { "KeyPrefixEquals": "use/port_redirection" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, + { "Condition": { "KeyPrefixEquals": "use/networking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/networking/" } }, + { "Condition": { "KeyPrefixEquals": "use/puppet/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/puppet/" } }, + { "Condition": { "KeyPrefixEquals": "use/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, + { "Condition": { "KeyPrefixEquals": "use/basics/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/basics/" } }, + { "Condition": { "KeyPrefixEquals": "use/chef/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/chef/" } }, + { "Condition": { "KeyPrefixEquals": "use/host_integration/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/host_integration/" } }, + { "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } }, + { "Condition": { "KeyPrefixEquals": "examples/cfengine_process_management/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/cfengine_process_management/" } }, + { "Condition": { "KeyPrefixEquals": "examples/https/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/https/" } }, + { "Condition": { "KeyPrefixEquals": "examples/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, + { "Condition": { "KeyPrefixEquals": "examples/using_supervisord/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/using_supervisord/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/registry_index_spec/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/hub_registry_spec/" } }, + { "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } } + ] +} + diff -Nru docker.io-0.9.1~dfsg1/.drone.yml docker.io-1.3.2~dfsg1/.drone.yml --- docker.io-0.9.1~dfsg1/.drone.yml 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/.drone.yml 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,14 @@ +image: dockercore/docker +env: + - AUTO_GOPATH=1 + - DOCKER_GRAPHDRIVER=vfs + - DOCKER_EXECDRIVER=native +script: +# Setup the DockerInDocker environment. + - hack/dind +# Tests relying on StartWithBusybox make Drone time out. + - rm integration-cli/docker_cli_daemon_test.go + - rm integration-cli/docker_cli_exec_test.go +# Validate and test. + - hack/make.sh validate-dco validate-gofmt + - hack/make.sh binary cross test-unit test-integration-cli test-integration diff -Nru docker.io-0.9.1~dfsg1/engine/engine.go docker.io-1.3.2~dfsg1/engine/engine.go --- docker.io-0.9.1~dfsg1/engine/engine.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/engine.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,15 +3,24 @@ import ( "bufio" "fmt" - "github.com/dotcloud/docker/utils" "io" - "log" "os" - "runtime" "sort" "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/utils" ) +// Installer is a standard interface for objects which can "install" themselves +// on an engine by registering handlers. +// This can be used as an entrypoint for external plugins etc. +type Installer interface { + Install(*Engine) error +} + type Handler func(*Job) Status var globalHandlers map[string]Handler @@ -37,17 +46,18 @@ // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. type Engine struct { - root string - handlers map[string]Handler - hack Hack // data for temporary hackery (see hack.go) - id string - Stdout io.Writer - Stderr io.Writer - Stdin io.Reader -} - -func (eng *Engine) Root() string { - return eng.root + handlers map[string]Handler + catchall Handler + hack Hack // data for temporary hackery (see hack.go) + id string + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader + Logging bool + tasks sync.WaitGroup + l sync.RWMutex // lock for shutdown + shutdown bool + onShutdown []func() // shutdown handlers } func (eng *Engine) Register(name string, handler Handler) error { @@ -59,43 +69,19 @@ return nil } -// New initializes a new engine managing the directory specified at `root`. -// `root` is used to store containers and any other state private to the engine. -// Changing the contents of the root without executing a job will cause unspecified -// behavior. -func New(root string) (*Engine, error) { - // Check for unsupported architectures - if runtime.GOARCH != "amd64" { - return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) - } - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.8 crashes are clearer. - // For details see http://github.com/dotcloud/docker/issues/407 - if k, err := utils.GetKernelVersion(); err != nil { - log.Printf("WARNING: %s\n", err) - } else { - if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) - } - } - } - - if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { - return nil, err - } +func (eng *Engine) RegisterCatchall(catchall Handler) { + eng.catchall = catchall +} +// New initializes a new engine. +func New() *Engine { eng := &Engine{ - root: root, handlers: make(map[string]Handler), id: utils.RandomString(), Stdout: os.Stdout, Stderr: os.Stderr, Stdin: os.Stdin, + Logging: true, } eng.Register("commands", func(job *Job) Status { for _, name := range eng.commands() { @@ -107,11 +93,11 @@ for k, v := range globalHandlers { eng.handlers[k] = v } - return eng, nil + return eng } func (eng *Engine) String() string { - return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8]) + return fmt.Sprintf("%s", eng.id[:8]) } // Commands returns a list of all currently registered commands, @@ -129,22 +115,100 @@ // This function mimics `Command` from the standard os/exec package. func (eng *Engine) Job(name string, args ...string) *Job { job := &Job{ - Eng: eng, - Name: name, - Args: args, - Stdin: NewInput(), - Stdout: NewOutput(), - Stderr: NewOutput(), - env: &Env{}, + Eng: eng, + Name: name, + Args: args, + Stdin: NewInput(), + Stdout: NewOutput(), + Stderr: NewOutput(), + env: &Env{}, + closeIO: true, } - job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) - handler, exists := eng.handlers[name] - if exists { + if eng.Logging { + job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) + } + + // Catchall is shadowed by specific Register. + if handler, exists := eng.handlers[name]; exists { job.handler = handler + } else if eng.catchall != nil && name != "" { + // empty job names are illegal, catchall or not. + job.handler = eng.catchall } return job } +// OnShutdown registers a new callback to be called by Shutdown. +// This is typically used by services to perform cleanup. +func (eng *Engine) OnShutdown(h func()) { + eng.l.Lock() + eng.onShutdown = append(eng.onShutdown, h) + eng.l.Unlock() +} + +// Shutdown permanently shuts down eng as follows: +// - It refuses all new jobs, permanently. +// - It waits for all active jobs to complete (with no timeout) +// - It calls all shutdown handlers concurrently (if any) +// - It returns when all handlers complete, or after 15 seconds, +// whichever happens first. +func (eng *Engine) Shutdown() { + eng.l.Lock() + if eng.shutdown { + eng.l.Unlock() + return + } + eng.shutdown = true + eng.l.Unlock() + // We don't need to protect the rest with a lock, to allow + // for other calls to immediately fail with "shutdown" instead + // of hanging for 15 seconds. + // This requires all concurrent calls to check for shutdown, otherwise + // it might cause a race. + + // Wait for all jobs to complete. + // Timeout after 5 seconds. + tasksDone := make(chan struct{}) + go func() { + eng.tasks.Wait() + close(tasksDone) + }() + select { + case <-time.After(time.Second * 5): + case <-tasksDone: + } + + // Call shutdown handlers, if any. + // Timeout after 10 seconds. + var wg sync.WaitGroup + for _, h := range eng.onShutdown { + wg.Add(1) + go func(h func()) { + defer wg.Done() + h() + }(h) + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-time.After(time.Second * 10): + case <-done: + } + return +} + +// IsShutdown returns true if the engine is in the process +// of shutting down, or already shut down. +// Otherwise it returns false. +func (eng *Engine) IsShutdown() bool { + eng.l.RLock() + defer eng.l.RUnlock() + return eng.shutdown +} + // ParseJob creates a new job from a text description using a shell-like syntax. // // The following syntax is used to parse `input`: @@ -188,9 +252,9 @@ } func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { - if os.Getenv("TEST") == "" { - prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) - return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) + if !eng.Logging { + return 0, nil } - return 0, nil + prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) + return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) } diff -Nru docker.io-0.9.1~dfsg1/engine/engine_test.go docker.io-1.3.2~dfsg1/engine/engine_test.go --- docker.io-0.9.1~dfsg1/engine/engine_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/engine_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,10 +2,6 @@ import ( "bytes" - "io/ioutil" - "os" - "path" - "path/filepath" "strings" "testing" ) @@ -21,7 +17,7 @@ // Register is global so let's cleanup to avoid conflicts defer unregister("dummy1") - eng := newTestEngine(t) + eng := New() //Should fail because global handlers are copied //at the engine creation @@ -40,7 +36,7 @@ } func TestJob(t *testing.T) { - eng := newTestEngine(t) + eng := New() job1 := eng.Job("dummy1", "--level=awesome") if job1.handler != nil { @@ -65,9 +61,19 @@ } } +func TestEngineShutdown(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("Engine should not show as shutdown") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("Engine should show as shutdown") + } +} + func TestEngineCommands(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() handler := func(job *Job) Status { return StatusOK } eng.Register("foo", handler) eng.Register("bar", handler) @@ -83,44 +89,9 @@ } } -func TestEngineRoot(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - // We expect Root to resolve to an absolute path. - // FIXME: this should not be necessary. - // Until the above FIXME is implemented, let's check for the - // current behavior. - tmp, err = filepath.EvalSymlinks(tmp) - if err != nil { - t.Fatal(err) - } - tmp, err = filepath.Abs(tmp) - if err != nil { - t.Fatal(err) - } - dir := path.Join(tmp, "dir") - eng, err := New(dir) - if err != nil { - t.Fatal(err) - } - if st, err := os.Stat(dir); err != nil { - t.Fatal(err) - } else if !st.IsDir() { - t.Fatalf("engine.New() created something other than a directory at %s", dir) - } - if r := eng.Root(); r != dir { - t.Fatalf("Expected: %v\nReceived: %v", dir, r) - } -} - func TestEngineString(t *testing.T) { - eng1 := newTestEngine(t) - defer os.RemoveAll(eng1.Root()) - eng2 := newTestEngine(t) - defer os.RemoveAll(eng2.Root()) + eng1 := New() + eng2 := New() s1 := eng1.String() s2 := eng2.String() if eng1 == eng2 { @@ -129,8 +100,7 @@ } func TestEngineLogf(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() input := "Test log line" if n, err := eng.Logf("%s\n", input); err != nil { t.Fatal(err) @@ -140,8 +110,7 @@ } func TestParseJob(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() // Verify that the resulting job calls to the right place var called bool eng.Register("echo", func(job *Job) Status { @@ -175,3 +144,19 @@ t.Fatalf("Job was not called") } } + +func TestCatchallEmptyName(t *testing.T) { + eng := New() + var called bool + eng.RegisterCatchall(func(job *Job) Status { + called = true + return StatusOK + }) + err := eng.Job("").Run() + if err == nil { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } + if called { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } +} diff -Nru docker.io-0.9.1~dfsg1/engine/env.go docker.io-1.3.2~dfsg1/engine/env.go --- docker.io-0.9.1~dfsg1/engine/env.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/env.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,15 +5,16 @@ "encoding/json" "fmt" "io" - "sort" "strconv" "strings" ) type Env []string +// Get returns the last value associated with the given key. If there are no +// values associated with the key, Get returns the empty string. func (env *Env) Get(key string) (value string) { - // FIXME: use Map() + // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315 for _, kv := range *env { if strings.Index(kv, "=") == -1 { continue @@ -36,6 +37,13 @@ return exists } +// Len returns the number of keys in the environment. +// Note that len(env) might be different from env.Len(), +// because the same key might be set multiple times. +func (env *Env) Len() int { + return len(env.Map()) +} + func (env *Env) Init(src *Env) { (*env) = make([]string, 0, len(*src)) for _, val := range *src { @@ -179,6 +187,12 @@ } func (env *Env) SetAuto(k string, v interface{}) { + // Issue 7941 - if the value in the incoming JSON is null then treat it + // as if they never specified the property at all. + if v == nil { + return + } + // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) @@ -193,6 +207,22 @@ } } +func changeFloats(v interface{}) interface{} { + switch v := v.(type) { + case float64: + return int(v) + case map[string]interface{}: + for key, val := range v { + v[key] = changeFloats(val) + } + case []interface{}: + for idx, val := range v { + v[idx] = changeFloats(val) + } + } + return v +} + func (env *Env) Encode(dst io.Writer) error { m := make(map[string]interface{}) for k, v := range env.Map() { @@ -201,10 +231,7 @@ // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - if fval, isFloat := val.(float64); isFloat { - val = int(fval) - } - m[k] = val + m[k] = changeFloats(val) } else { m[k] = v } @@ -245,134 +272,26 @@ return m } -type Table struct { - Data []*Env - sortKey string - Chan chan *Env -} - -func NewTable(sortKey string, sizeHint int) *Table { - return &Table{ - make([]*Env, 0, sizeHint), - sortKey, - make(chan *Env), - } -} - -func (t *Table) SetKey(sortKey string) { - t.sortKey = sortKey -} - -func (t *Table) Add(env *Env) { - t.Data = append(t.Data, env) -} - -func (t *Table) Len() int { - return len(t.Data) -} - -func (t *Table) Less(a, b int) bool { - return t.lessBy(a, b, t.sortKey) -} - -func (t *Table) lessBy(a, b int, by string) bool { - keyA := t.Data[a].Get(by) - keyB := t.Data[b].Get(by) - intA, errA := strconv.ParseInt(keyA, 10, 64) - intB, errB := strconv.ParseInt(keyB, 10, 64) - if errA == nil && errB == nil { - return intA < intB - } - return keyA < keyB -} - -func (t *Table) Swap(a, b int) { - tmp := t.Data[a] - t.Data[a] = t.Data[b] - t.Data[b] = tmp -} - -func (t *Table) Sort() { - sort.Sort(t) -} - -func (t *Table) ReverseSort() { - sort.Sort(sort.Reverse(t)) -} - -func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { - if _, err := dst.Write([]byte{'['}); err != nil { - return -1, err - } - n = 1 - for i, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - if i != len(t.Data)-1 { - if _, err := dst.Write([]byte{','}); err != nil { - return -1, err - } - n += 1 - } - } - if _, err := dst.Write([]byte{']'}); err != nil { - return -1, err - } - return n + 1, nil -} - -func (t *Table) ToListString() (string, error) { - buffer := bytes.NewBuffer(nil) - if _, err := t.WriteListTo(buffer); err != nil { - return "", err - } - return buffer.String(), nil -} - -func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { - for _, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - } - return n, nil -} - -func (t *Table) ReadListFrom(src []byte) (n int64, err error) { - var array []interface{} - - if err := json.Unmarshal(src, &array); err != nil { - return -1, err - } - - for _, item := range array { - if m, ok := item.(map[string]interface{}); ok { - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - t.Add(env) - } +// MultiMap returns a representation of env as a +// map of string arrays, keyed by string. +// This is the same structure as http headers for example, +// which allow each key to have multiple values. +func (env *Env) MultiMap() map[string][]string { + m := make(map[string][]string) + for _, kv := range *env { + parts := strings.SplitN(kv, "=", 2) + m[parts[0]] = append(m[parts[0]], parts[1]) } - - return int64(len(src)), nil + return m } -func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err == io.EOF { - return 0, nil - } else if err != nil { - return -1, err +// InitMultiMap removes all values in env, then initializes +// new values from the contents of m. +func (env *Env) InitMultiMap(m map[string][]string) { + (*env) = make([]string, 0, len(m)) + for k, vals := range m { + for _, v := range vals { + env.Set(k, v) } - t.Add(env) } - return 0, nil } diff -Nru docker.io-0.9.1~dfsg1/engine/env_test.go docker.io-1.3.2~dfsg1/engine/env_test.go --- docker.io-0.9.1~dfsg1/engine/env_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/env_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,53 @@ package engine import ( + "bytes" + "encoding/json" "testing" + + "github.com/docker/docker/pkg/testutils" ) +func TestEnvLenZero(t *testing.T) { + env := &Env{} + if env.Len() != 0 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenNotZero(t *testing.T) { + env := &Env{} + env.Set("foo", "bar") + env.Set("ga", "bu") + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "a=b", + } + // len(env) != env.Len() + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvGetDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "foo=bif", + } + expected := "bif" + if v := env.Get("foo"); v != expected { + t.Fatalf("expect %q, got %q", expected, v) + } +} + func TestNewJob(t *testing.T) { job := mkJob(t, "dummy", "--level=awesome") if job.Name != "dummy" { @@ -95,3 +139,186 @@ t.Fatalf("bar not found in the environ") } } + +func TestMultiMap(t *testing.T) { + e := &Env{} + e.Set("foo", "bar") + e.Set("bar", "baz") + e.Set("hello", "world") + m := e.MultiMap() + e2 := &Env{} + e2.Set("old_key", "something something something") + e2.InitMultiMap(m) + if v := e2.Get("old_key"); v != "" { + t.Fatalf("%#v", v) + } + if v := e2.Get("bar"); v != "baz" { + t.Fatalf("%#v", v) + } + if v := e2.Get("hello"); v != "world" { + t.Fatalf("%#v", v) + } +} + +func testMap(l int) [][2]string { + res := make([][2]string, l) + for i := 0; i < l; i++ { + t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} + res[i] = t + } + return res +} + +func BenchmarkSet(b *testing.B) { + fix := testMap(100) + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + } +} + +func BenchmarkSetJson(b *testing.B) { + fix := testMap(100) + type X struct { + f string + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkGet(b *testing.B) { + fix := testMap(100) + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + env.Get(kv[0]) + } + } +} + +func BenchmarkGetJson(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + for _, kv := range fix { + env.SetJson(kv[0], X{kv[1]}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + if err := env.GetJson(kv[0], &X{}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkEncode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + b.ResetTimer() + for i := 0; i < b.N; i++ { + env.Encode(&writer) + writer.Reset() + } +} + +func BenchmarkDecode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + env.Encode(&writer) + denv := &Env{} + reader := bytes.NewReader(writer.Bytes()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := denv.Decode(reader) + if err != nil { + b.Fatal(err) + } + reader.Seek(0, 0) + } +} + +func TestLongNumbers(t *testing.T) { + type T struct { + TestNum int64 + } + v := T{67108864} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum != v.TestNum { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} + +func TestLongNumbersArray(t *testing.T) { + type T struct { + TestNum []int64 + } + v := T{[]int64{67108864}} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum[0] != v.TestNum[0] { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} diff -Nru docker.io-0.9.1~dfsg1/engine/helpers_test.go docker.io-1.3.2~dfsg1/engine/helpers_test.go --- docker.io-0.9.1~dfsg1/engine/helpers_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/helpers_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,24 +1,11 @@ package engine import ( - "github.com/dotcloud/docker/utils" "testing" ) var globalTestID string -func newTestEngine(t *testing.T) *Engine { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - eng, err := New(tmp) - if err != nil { - t.Fatal(err) - } - return eng -} - func mkJob(t *testing.T, name string, args ...string) *Job { - return newTestEngine(t).Job(name, args...) + return New().Job(name, args...) } diff -Nru docker.io-0.9.1~dfsg1/engine/http.go docker.io-1.3.2~dfsg1/engine/http.go --- docker.io-0.9.1~dfsg1/engine/http.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/http.go 2014-11-24 17:38:01.000000000 +0000 @@ -9,7 +9,7 @@ // result as an http response. // This method allows an Engine instance to be passed as a standard http.Handler interface. // -// Note that the protocol used in this methid is a convenience wrapper and is not the canonical +// Note that the protocol used in this method is a convenience wrapper and is not the canonical // implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, // and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response // once data has been written to the body, which makes it inconvenient to return metadata such diff -Nru docker.io-0.9.1~dfsg1/engine/job.go docker.io-1.3.2~dfsg1/engine/job.go --- docker.io-0.9.1~dfsg1/engine/job.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/job.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,9 @@ package engine import ( + "bytes" "fmt" "io" - "os" "strings" "time" ) @@ -17,8 +17,7 @@ // environment variables, standard streams for input, output and error, and // an exit status which can indicate success (0) or error (anything else). // -// One slight variation is that jobs report their status as a string. The -// string "0" indicates success, and any other strings indicates an error. +// For status, 0 indicates success, and any other integers indicates an error. // This allows for richer error reporting. // type Job struct { @@ -32,7 +31,7 @@ handler Handler status Status end time.Time - onExit []func() + closeIO bool } type Status int @@ -47,6 +46,20 @@ // If the job returns a failure status, an error is returned // which includes the status. func (job *Job) Run() error { + if job.Eng.IsShutdown() { + return fmt.Errorf("engine is shutdown") + } + // FIXME: this is a temporary workaround to avoid Engine.Shutdown + // waiting 5 seconds for server/api.ServeApi to complete (which it never will) + // everytime the daemon is cleanly restarted. + // The permanent fix is to implement Job.Stop and Job.OnStop so that + // ServeApi can cooperate and terminate cleanly. + if job.Name != "serveapi" { + job.Eng.l.Lock() + job.Eng.tasks.Add(1) + job.Eng.l.Unlock() + defer job.Eng.tasks.Done() + } // FIXME: make this thread-safe // FIXME: implement wait if !job.end.IsZero() { @@ -57,8 +70,8 @@ defer func() { job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) }() - var errorMessage string - job.Stderr.AddString(&errorMessage) + var errorMessage = bytes.NewBuffer(nil) + job.Stderr.Add(errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 @@ -66,16 +79,22 @@ job.status = job.handler(job) job.end = time.Now() } - // Wait for all background tasks to complete - if err := job.Stdout.Close(); err != nil { - return err - } - if err := job.Stderr.Close(); err != nil { - return err + if job.closeIO { + // Wait for all background tasks to complete + if err := job.Stdout.Close(); err != nil { + return err + } + if err := job.Stderr.Close(); err != nil { + return err + } + if err := job.Stdin.Close(); err != nil { + return err + } } if job.status != 0 { - return fmt.Errorf("%s", errorMessage) + return fmt.Errorf("%s", Tail(errorMessage, 1)) } + return nil } @@ -189,11 +208,8 @@ } func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { - if os.Getenv("TEST") == "" { - prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) - return fmt.Fprintf(job.Stderr, prefixedFormat, args...) - } - return 0, nil + prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) + return fmt.Fprintf(job.Stderr, prefixedFormat, args...) } func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { @@ -212,3 +228,11 @@ fmt.Fprintf(job.Stderr, "%s\n", err) return StatusErr } + +func (job *Job) StatusCode() int { + return int(job.status) +} + +func (job *Job) SetCloseIO(val bool) { + job.closeIO = val +} diff -Nru docker.io-0.9.1~dfsg1/engine/job_test.go docker.io-1.3.2~dfsg1/engine/job_test.go --- docker.io-0.9.1~dfsg1/engine/job_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/job_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,13 +1,13 @@ package engine import ( - "os" + "bytes" + "fmt" "testing" ) func TestJobStatusOK(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() eng.Register("return_ok", func(job *Job) Status { return StatusOK }) err := eng.Job("return_ok").Run() if err != nil { @@ -16,8 +16,7 @@ } func TestJobStatusErr(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() eng.Register("return_err", func(job *Job) Status { return StatusErr }) err := eng.Job("return_err").Run() if err == nil { @@ -26,8 +25,7 @@ } func TestJobStatusNotFound(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) err := eng.Job("return_not_found").Run() if err == nil { @@ -36,8 +34,7 @@ } func TestJobStdoutString(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stdout", func(job *Job) Status { job.Printf("Hello world\n") @@ -45,21 +42,20 @@ }) job := eng.Job("say_something_in_stdout") - var output string - if err := job.Stdout.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + fmt.Println(outputBuffer) + var output = Tail(outputBuffer, 1) if expectedOutput := "Hello world"; output != expectedOutput { t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } } func TestJobStderrString(t *testing.T) { - eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) + eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stderr", func(job *Job) Status { job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") @@ -67,13 +63,12 @@ }) job := eng.Job("say_something_in_stderr") - var output string - if err := job.Stderr.AddString(&output); err != nil { - t.Fatal(err) - } + var outputBuffer = bytes.NewBuffer(nil) + job.Stderr.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } + var output = Tail(outputBuffer, 1) if expectedOutput := "Something happened"; output != expectedOutput { t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } diff -Nru docker.io-0.9.1~dfsg1/engine/MAINTAINERS docker.io-1.3.2~dfsg1/engine/MAINTAINERS --- docker.io-0.9.1~dfsg1/engine/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1 +1 @@ -#Solomon Hykes Temporarily unavailable +Solomon Hykes (@shykes) diff -Nru docker.io-0.9.1~dfsg1/engine/shutdown_test.go docker.io-1.3.2~dfsg1/engine/shutdown_test.go --- docker.io-0.9.1~dfsg1/engine/shutdown_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/shutdown_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,80 @@ +package engine + +import ( + "testing" + "time" +) + +func TestShutdownEmpty(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("IsShutdown should be false") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("IsShutdown should be true") + } +} + +func TestShutdownAfterRun(t *testing.T) { + eng := New() + var called bool + eng.Register("foo", func(job *Job) Status { + called = true + return StatusOK + }) + if err := eng.Job("foo").Run(); err != nil { + t.Fatal(err) + } + eng.Shutdown() + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("%#v", *eng) + } +} + +// An approximate and racy, but better-than-nothing test that +// +func TestShutdownDuringRun(t *testing.T) { + var ( + jobDelay time.Duration = 500 * time.Millisecond + jobDelayLow time.Duration = 100 * time.Millisecond + jobDelayHigh time.Duration = 700 * time.Millisecond + ) + eng := New() + var completed bool + eng.Register("foo", func(job *Job) Status { + time.Sleep(jobDelay) + completed = true + return StatusOK + }) + go eng.Job("foo").Run() + time.Sleep(50 * time.Millisecond) + done := make(chan struct{}) + var startShutdown time.Time + go func() { + startShutdown = time.Now() + eng.Shutdown() + close(done) + }() + time.Sleep(50 * time.Millisecond) + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("run on shutdown should fail: %#v", *eng) + } + <-done + // Verify that Shutdown() blocks for roughly 500ms, instead + // of returning almost instantly. + // + // We use >100ms to leave ample margin for race conditions between + // goroutines. It's possible (but unlikely in reasonable testing + // conditions), that this test will cause a false positive or false + // negative. But it's probably better than not having any test + // for the 99.999% of time where testing conditions are reasonable. + if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { + t.Fatalf("shutdown did not block long enough: %v", d) + } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { + t.Fatalf("shutdown blocked too long: %v", d) + } + if !completed { + t.Fatalf("job did not complete") + } +} diff -Nru docker.io-0.9.1~dfsg1/engine/streams.go docker.io-1.3.2~dfsg1/engine/streams.go --- docker.io-0.9.1~dfsg1/engine/streams.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/streams.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,8 +1,7 @@ package engine import ( - "bufio" - "container/ring" + "bytes" "fmt" "io" "io/ioutil" @@ -16,6 +15,28 @@ used bool } +// Tail returns the n last lines of a buffer +// stripped out of the last \n, if any +// if n <= 0, returns an empty string +func Tail(buffer *bytes.Buffer, n int) string { + if n <= 0 { + return "" + } + bytes := buffer.Bytes() + if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' { + bytes = bytes[:len(bytes)-1] + } + for i := buffer.Len() - 2; i >= 0; i-- { + if bytes[i] == '\n' { + n-- + if n == 0 { + return string(bytes[i+1:]) + } + } + } + return string(bytes) +} + // NewOutput returns a new Output object with no destinations attached. // Writing to an empty Output will cause the written data to be discarded. func NewOutput() *Output { @@ -58,42 +79,6 @@ return r, nil } -// AddTail starts a new goroutine which will read all subsequent data written to the output, -// line by line, and append the last `n` lines to `dst`. -func (o *Output) AddTail(dst *[]string, n int) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - Tail(src, n, dst) - }() - return nil -} - -// AddString starts a new goroutine which will read all subsequent data written to the output, -// line by line, and store the last line into `dst`. -func (o *Output) AddString(dst *string) error { - src, err := o.AddPipe() - if err != nil { - return err - } - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - lines := make([]string, 0, 1) - Tail(src, 1, &lines) - if len(lines) == 0 { - *dst = "" - } else { - *dst = lines[0] - } - }() - return nil -} - // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { @@ -118,7 +103,7 @@ defer o.Unlock() var firstErr error for _, dst := range o.dests { - if closer, ok := dst.(io.WriteCloser); ok { + if closer, ok := dst.(io.Closer); ok { err := closer.Close() if err != nil && firstErr == nil { firstErr = err @@ -154,7 +139,7 @@ // Not thread safe on purpose func (i *Input) Close() error { if i.src != nil { - if closer, ok := i.src.(io.WriteCloser); ok { + if closer, ok := i.src.(io.Closer); ok { return closer.Close() } } @@ -174,26 +159,6 @@ return nil } -// Tail reads from `src` line per line, and returns the last `n` lines as an array. -// A ring buffer is used to only store `n` lines at any time. -func Tail(src io.Reader, n int, dst *[]string) { - scanner := bufio.NewScanner(src) - r := ring.New(n) - for scanner.Scan() { - if n == 0 { - continue - } - r.Value = scanner.Text() - r = r.Next() - } - r.Do(func(v interface{}) { - if v == nil { - return - } - *dst = append(*dst, v.(string)) - }) -} - // AddEnv starts a new goroutine which will decode all subsequent data // as a stream of json-encoded objects, and point `dst` to the last // decoded object. diff -Nru docker.io-0.9.1~dfsg1/engine/streams_test.go docker.io-1.3.2~dfsg1/engine/streams_test.go --- docker.io-0.9.1~dfsg1/engine/streams_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/streams_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -10,53 +10,6 @@ "testing" ) -func TestOutputAddString(t *testing.T) { - var testInputs = [][2]string{ - { - "hello, world!", - "hello, world!", - }, - - { - "One\nTwo\nThree", - "Three", - }, - - { - "", - "", - }, - - { - "A line\nThen another nl-terminated line\n", - "Then another nl-terminated line", - }, - - { - "A line followed by an empty line\n\n", - "", - }, - } - for _, testData := range testInputs { - input := testData[0] - expectedOutput := testData[1] - o := NewOutput() - var output string - if err := o.AddString(&output); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if output != expectedOutput { - t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output) - } - } -} - type sentinelWriteCloser struct { calledWrite bool calledClose bool @@ -145,59 +98,24 @@ } func TestTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, - } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, - } - for input, outputs := range tests { - for n, expectedOutput := range outputs { - var output []string - Tail(strings.NewReader(input), n, &output) - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output) - } - } + var tests = make(map[string][]string) + tests["hello, world!"] = []string{ + "", + "hello, world!", + "hello, world!", + "hello, world!", } -} - -func TestOutputAddTail(t *testing.T) { - var tests = make(map[string][][]string) - tests["hello, world!"] = [][]string{ - {}, - {"hello, world!"}, - {"hello, world!"}, - {"hello, world!"}, - } - tests["One\nTwo\nThree"] = [][]string{ - {}, - {"Three"}, - {"Two", "Three"}, - {"One", "Two", "Three"}, + tests["One\nTwo\nThree"] = []string{ + "", + "Three", + "Two\nThree", + "One\nTwo\nThree", } for input, outputs := range tests { for n, expectedOutput := range outputs { - o := NewOutput() - var output []string - if err := o.AddTail(&output, n); err != nil { - t.Error(err) - } - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - o.Close() - if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { - t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output) + output := Tail(bytes.NewBufferString(input), n) + if output != expectedOutput { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) } } } @@ -223,7 +141,7 @@ t.Fatalf("Expected %d, got %d", len(input), n) } if output := b.String(); output != input { - t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) + t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) } } diff -Nru docker.io-0.9.1~dfsg1/engine/table.go docker.io-1.3.2~dfsg1/engine/table.go --- docker.io-0.9.1~dfsg1/engine/table.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/table.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,140 @@ +package engine + +import ( + "bytes" + "encoding/json" + "io" + "sort" + "strconv" +) + +type Table struct { + Data []*Env + sortKey string + Chan chan *Env +} + +func NewTable(sortKey string, sizeHint int) *Table { + return &Table{ + make([]*Env, 0, sizeHint), + sortKey, + make(chan *Env), + } +} + +func (t *Table) SetKey(sortKey string) { + t.sortKey = sortKey +} + +func (t *Table) Add(env *Env) { + t.Data = append(t.Data, env) +} + +func (t *Table) Len() int { + return len(t.Data) +} + +func (t *Table) Less(a, b int) bool { + return t.lessBy(a, b, t.sortKey) +} + +func (t *Table) lessBy(a, b int, by string) bool { + keyA := t.Data[a].Get(by) + keyB := t.Data[b].Get(by) + intA, errA := strconv.ParseInt(keyA, 10, 64) + intB, errB := strconv.ParseInt(keyB, 10, 64) + if errA == nil && errB == nil { + return intA < intB + } + return keyA < keyB +} + +func (t *Table) Swap(a, b int) { + tmp := t.Data[a] + t.Data[a] = t.Data[b] + t.Data[b] = tmp +} + +func (t *Table) Sort() { + sort.Sort(t) +} + +func (t *Table) ReverseSort() { + sort.Sort(sort.Reverse(t)) +} + +func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { + if _, err := dst.Write([]byte{'['}); err != nil { + return -1, err + } + n = 1 + for i, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + if i != len(t.Data)-1 { + if _, err := dst.Write([]byte{','}); err != nil { + return -1, err + } + n++ + } + } + if _, err := dst.Write([]byte{']'}); err != nil { + return -1, err + } + return n + 1, nil +} + +func (t *Table) ToListString() (string, error) { + buffer := bytes.NewBuffer(nil) + if _, err := t.WriteListTo(buffer); err != nil { + return "", err + } + return buffer.String(), nil +} + +func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { + for _, env := range t.Data { + bytes, err := env.WriteTo(dst) + if err != nil { + return -1, err + } + n += bytes + } + return n, nil +} + +func (t *Table) ReadListFrom(src []byte) (n int64, err error) { + var array []interface{} + + if err := json.Unmarshal(src, &array); err != nil { + return -1, err + } + + for _, item := range array { + if m, ok := item.(map[string]interface{}); ok { + env := &Env{} + for key, value := range m { + env.SetAuto(key, value) + } + t.Add(env) + } + } + + return int64(len(src)), nil +} + +func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { + decoder := NewDecoder(src) + for { + env, err := decoder.Decode() + if err == io.EOF { + return 0, nil + } else if err != nil { + return -1, err + } + t.Add(env) + } +} diff -Nru docker.io-0.9.1~dfsg1/engine/table_test.go docker.io-1.3.2~dfsg1/engine/table_test.go --- docker.io-0.9.1~dfsg1/engine/table_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/engine/table_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -26,3 +26,87 @@ t.Fatalf("Inccorect output: %v", output) } } + +func TestTableSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.Sort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "B" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "C" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } +} + +func TestTableReverseSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.ReverseSort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "C" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "B" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } +} diff -Nru docker.io-0.9.1~dfsg1/events/events.go docker.io-1.3.2~dfsg1/events/events.go --- docker.io-0.9.1~dfsg1/events/events.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/events/events.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,176 @@ +package events + +import ( + "encoding/json" + "sync" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/utils" +) + +const eventsLimit = 64 + +type listener chan<- *utils.JSONMessage + +type Events struct { + mu sync.RWMutex + events []*utils.JSONMessage + subscribers []listener +} + +func New() *Events { + return &Events{ + events: make([]*utils.JSONMessage, 0, eventsLimit), + } +} + +// Install installs events public api in docker engine +func (e *Events) Install(eng *engine.Engine) error { + // Here you should describe public interface + jobs := map[string]engine.Handler{ + "events": e.Get, + "log": e.Log, + "subscribers_count": e.SubscribersCount, + } + for name, job := range jobs { + if err := eng.Register(name, job); err != nil { + return err + } + } + return nil +} + +func (e *Events) Get(job *engine.Job) engine.Status { + var ( + since = job.GetenvInt64("since") + until = job.GetenvInt64("until") + timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) + ) + + // If no until, disable timeout + if until == 0 { + timeout.Stop() + } + + listener := make(chan *utils.JSONMessage) + e.subscribe(listener) + defer e.unsubscribe(listener) + + job.Stdout.Write(nil) + + // Resend every event in the [since, until] time interval. + if since != 0 { + if err := e.writeCurrent(job, since, until); err != nil { + return job.Error(err) + } + } + + for { + select { + case event, ok := <-listener: + if !ok { + return engine.StatusOK + } + if err := writeEvent(job, event); err != nil { + return job.Error(err) + } + case <-timeout.C: + return engine.StatusOK + } + } +} + +func (e *Events) Log(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("usage: %s ACTION ID FROM", job.Name) + } + // not waiting for receivers + go e.log(job.Args[0], job.Args[1], job.Args[2]) + return engine.StatusOK +} + +func (e *Events) SubscribersCount(job *engine.Job) engine.Status { + ret := &engine.Env{} + ret.SetInt("count", e.subscribersCount()) + ret.WriteTo(job.Stdout) + return engine.StatusOK +} + +func writeEvent(job *engine.Job, event *utils.JSONMessage) error { + // When sending an event JSON serialization errors are ignored, but all + // other errors lead to the eviction of the listener. + if b, err := json.Marshal(event); err == nil { + if _, err = job.Stdout.Write(b); err != nil { + return err + } + } + return nil +} + +func (e *Events) writeCurrent(job *engine.Job, since, until int64) error { + e.mu.RLock() + for _, event := range e.events { + if event.Time >= since && (event.Time <= until || until == 0) { + if err := writeEvent(job, event); err != nil { + e.mu.RUnlock() + return err + } + } + } + e.mu.RUnlock() + return nil +} + +func (e *Events) subscribersCount() int { + e.mu.RLock() + c := len(e.subscribers) + e.mu.RUnlock() + return c +} + +func (e *Events) log(action, id, from string) { + e.mu.Lock() + now := time.Now().UTC().Unix() + jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now} + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + for _, s := range e.subscribers { + // We give each subscriber a 100ms time window to receive the event, + // after which we move to the next. + select { + case s <- jm: + case <-time.After(100 * time.Millisecond): + } + } + e.mu.Unlock() +} + +func (e *Events) subscribe(l listener) { + e.mu.Lock() + e.subscribers = append(e.subscribers, l) + e.mu.Unlock() +} + +// unsubscribe closes and removes the specified listener from the list of +// previously registed ones. +// It returns a boolean value indicating if the listener was successfully +// found, closed and unregistered. +func (e *Events) unsubscribe(l listener) bool { + e.mu.Lock() + for i, subscriber := range e.subscribers { + if subscriber == l { + close(l) + e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...) + e.mu.Unlock() + return true + } + } + e.mu.Unlock() + return false +} diff -Nru docker.io-0.9.1~dfsg1/events/events_test.go docker.io-1.3.2~dfsg1/events/events_test.go --- docker.io-0.9.1~dfsg1/events/events_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/events/events_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,154 @@ +package events + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "testing" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/utils" +) + +func TestEventsPublish(t *testing.T) { + e := New() + l1 := make(chan *utils.JSONMessage) + l2 := make(chan *utils.JSONMessage) + e.subscribe(l1) + e.subscribe(l2) + count := e.subscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + go e.log("test", "cont", "image") + select { + case msg := <-l1: + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if msg.Status != "test" { + t.Fatalf("Status should be test, got %s", msg.Status) + } + if msg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", msg.ID) + } + if msg.From != "image" { + t.Fatalf("From should be image, got %s", msg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if msg.Status != "test" { + t.Fatalf("Status should be test, got %s", msg.Status) + } + if msg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", msg.ID) + } + if msg.From != "image" { + t.Fatalf("From should be image, got %s", msg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsPublishTimeout(t *testing.T) { + e := New() + l := make(chan *utils.JSONMessage) + e.subscribe(l) + + c := make(chan struct{}) + go func() { + e.log("test", "cont", "image") + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + eng := engine.New() + if err := e.Install(eng); err != nil { + t.Fatal(err) + } + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + job := eng.Job("log", action, id, from) + if err := job.Run(); err != nil { + t.Fatal(err) + } + } + time.Sleep(50 * time.Millisecond) + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + job := eng.Job("events") + job.SetenvInt64("since", 1) + job.SetenvInt64("until", time.Now().Unix()) + buf := bytes.NewBuffer(nil) + job.Stdout.Add(buf) + if err := job.Run(); err != nil { + t.Fatal(err) + } + buf = bytes.NewBuffer(buf.Bytes()) + dec := json.NewDecoder(buf) + var msgs []utils.JSONMessage + for { + var jm utils.JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + msgs = append(msgs, jm) + } + if len(msgs) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs)) + } + first := msgs[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_15", first.Status) + } + last := msgs[len(msgs)-1] + if last.Status != "action_79" { + t.Fatalf("First action is %s, must be action_79", first.Status) + } +} + +func TestEventsCountJob(t *testing.T) { + e := New() + eng := engine.New() + if err := e.Install(eng); err != nil { + t.Fatal(err) + } + l1 := make(chan *utils.JSONMessage) + l2 := make(chan *utils.JSONMessage) + e.subscribe(l1) + e.subscribe(l2) + job := eng.Job("subscribers_count") + env, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Fatal(err) + } + count := env.GetInt("count") + if count != 2 { + t.Fatalf("There must be 2 subscribers, got %d", count) + } +} diff -Nru docker.io-0.9.1~dfsg1/execdriver/driver.go docker.io-1.3.2~dfsg1/execdriver/driver.go --- docker.io-0.9.1~dfsg1/execdriver/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -package execdriver - -import ( - "errors" - "io" - "os" - "os/exec" -) - -var ( - ErrNotRunning = errors.New("Process could not be started") - ErrWaitTimeoutReached = errors.New("Wait timeout reached") - ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") - ErrDriverNotFound = errors.New("The requested docker init has not been found") -) - -var dockerInitFcts map[string]InitFunc - -type ( - StartCallback func(*Command) - InitFunc func(i *InitArgs) error -) - -func RegisterInitFunc(name string, fct InitFunc) error { - if dockerInitFcts == nil { - dockerInitFcts = make(map[string]InitFunc) - } - if _, ok := dockerInitFcts[name]; ok { - return ErrDriverAlreadyRegistered - } - dockerInitFcts[name] = fct - return nil -} - -func GetInitFunc(name string) (InitFunc, error) { - fct, ok := dockerInitFcts[name] - if !ok { - return nil, ErrDriverNotFound - } - return fct, nil -} - -// Args provided to the init function for a driver -type InitArgs struct { - User string - Gateway string - Ip string - WorkDir string - Privileged bool - Env []string - Args []string - Mtu int - Driver string - Console string - Pipe int - Root string -} - -// Driver specific information based on -// processes registered with the driver -type Info interface { - IsRunning() bool -} - -// Terminal in an interface for drivers to implement -// if they want to support Close and Resize calls from -// the core -type Terminal interface { - io.Closer - Resize(height, width int) error -} - -type TtyTerminal interface { - Master() *os.File -} - -type Driver interface { - Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code - Kill(c *Command, sig int) error - Name() string // Driver name - Info(id string) Info // "temporary" hack (until we move state from core to plugins) - GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. -} - -// Network settings of the container -type Network struct { - Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled - Mtu int `json:"mtu"` -} - -type NetworkInterface struct { - Gateway string `json:"gateway"` - IPAddress string `json:"ip"` - Bridge string `json:"bridge"` - IPPrefixLen int `json:"ip_prefix_len"` -} - -type Resources struct { - Memory int64 `json:"memory"` - MemorySwap int64 `json:"memory_swap"` - CpuShares int64 `json:"cpu_shares"` -} - -// Process wrapps an os/exec.Cmd to add more metadata -type Command struct { - exec.Cmd `json:"-"` - - ID string `json:"id"` - Privileged bool `json:"privileged"` - User string `json:"user"` - Rootfs string `json:"rootfs"` // root fs of the container - InitPath string `json:"initpath"` // dockerinit - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - WorkingDir string `json:"working_dir"` - ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver - Tty bool `json:"tty"` - Network *Network `json:"network"` - Config []string `json:"config"` // generic values that specific drivers can consume - Resources *Resources `json:"resources"` - - Terminal Terminal `json:"-"` // standard or tty terminal - Console string `json:"-"` // dev/console path - ContainerPid int `json:"container_pid"` // the pid for the process inside a container -} - -// Return the pid of the process -// If the process is nil -1 will be returned -func (c *Command) Pid() int { - return c.ContainerPid -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/driver.go docker.io-1.3.2~dfsg1/execdriver/lxc/driver.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,394 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" -) - -const DriverName = "lxc" - -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - if err := setupHostname(args); err != nil { - return err - } - - if err := setupNetworking(args); err != nil { - return err - } - - if err := setupCapabilities(args); err != nil { - return err - } - - if err := setupWorkingDirectory(args); err != nil { - return err - } - - if err := changeUser(args); err != nil { - return err - } - - path, err := exec.LookPath(args.Args[0]) - if err != nil { - log.Printf("Unable to locate %v", args.Args[0]) - os.Exit(127) - } - if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { - return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) - } - panic("Unreachable") - }) -} - -type driver struct { - root string // root path for the driver to use - apparmor bool - sharedRoot bool -} - -func NewDriver(root string, apparmor bool) (*driver, error) { - // setup unconfined symlink - if err := linkLxcStart(root); err != nil { - return nil, err - } - return &driver{ - apparmor: apparmor, - root: root, - sharedRoot: rootIsShared(), - }, nil -} - -func (d *driver) Name() string { - version := d.version() - return fmt.Sprintf("%s-%s", DriverName, version) -} - -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := execdriver.SetTerminal(c, pipes); err != nil { - return -1, err - } - configPath, err := d.generateLXCConfig(c) - if err != nil { - return -1, err - } - params := []string{ - "lxc-start", - "-n", c.ID, - "-f", configPath, - "--", - c.InitPath, - "-driver", - DriverName, - } - - if c.Network.Interface != nil { - params = append(params, - "-g", c.Network.Interface.Gateway, - "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), - ) - } - params = append(params, - "-mtu", strconv.Itoa(c.Network.Mtu), - ) - - if c.User != "" { - params = append(params, "-u", c.User) - } - - if c.Privileged { - if d.apparmor { - params[0] = path.Join(d.root, "lxc-start-unconfined") - - } - params = append(params, "-privileged") - } - - if c.WorkingDir != "" { - params = append(params, "-w", c.WorkingDir) - } - - params = append(params, "--", c.Entrypoint) - params = append(params, c.Arguments...) - - if d.sharedRoot { - // lxc-start really needs / to be non-shared, or all kinds of stuff break - // when lxc-start unmount things and those unmounts propagate to the main - // mount namespace. - // What we really want is to clone into a new namespace and then - // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork - // without exec in go we have to do this horrible shell hack... - shellString := - "mount --make-rslave /; exec " + - utils.ShellQuoteArguments(params) - - params = []string{ - "unshare", "-m", "--", "/bin/sh", "-c", shellString, - } - } - - var ( - name = params[0] - arg = params[1:] - ) - aname, err := exec.LookPath(name) - if err != nil { - aname = name - } - c.Path = aname - c.Args = append([]string{name}, arg...) - - if err := c.Start(); err != nil { - return -1, err - } - - var ( - waitErr error - waitLock = make(chan struct{}) - ) - go func() { - if err := c.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 - waitErr = err - } - } - close(waitLock) - }() - - // Poll lxc for RUNNING status - pid, err := d.waitForStart(c, waitLock) - if err != nil { - if c.Process != nil { - c.Process.Kill() - } - return -1, err - } - c.ContainerPid = pid - - if startCallback != nil { - startCallback(c) - } - - <-waitLock - - return getExitCode(c), waitErr -} - -/// Return the exit code of the process -// if the process has not exited -1 will be returned -func getExitCode(c *execdriver.Command) int { - if c.ProcessState == nil { - return -1 - } - return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() -} - -func (d *driver) Kill(c *execdriver.Command, sig int) error { - return KillLxc(c.ID, sig) -} - -func (d *driver) version() string { - var ( - version string - output []byte - err error - ) - if _, errPath := exec.LookPath("lxc-version"); errPath == nil { - output, err = exec.Command("lxc-version").CombinedOutput() - } else { - output, err = exec.Command("lxc-start", "--version").CombinedOutput() - } - if err == nil { - version = strings.TrimSpace(string(output)) - if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { - version = strings.TrimSpace(parts[1]) - } - } - return version -} - -func KillLxc(id string, sig int) error { - var ( - err error - output []byte - ) - _, err = exec.LookPath("lxc-kill") - if err == nil { - output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() - } else { - output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() - } - if err != nil { - return fmt.Errorf("Err: %s Output: %s", err, output) - } - return nil -} - -// wait for the process to start and return the pid for the process -func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { - var ( - err error - output []byte - ) - // We wait for the container to be fully running. - // Timeout after 5 seconds. In case of broken pipe, just retry. - // Note: The container can run and finish correctly before - // the end of this loop - for now := time.Now(); time.Since(now) < 5*time.Second; { - select { - case <-waitLock: - // If the process dies while waiting for it, just return - return -1, nil - default: - } - - output, err = d.getInfo(c.ID) - if err != nil { - output, err = d.getInfo(c.ID) - if err != nil { - return -1, err - } - } - info, err := parseLxcInfo(string(output)) - if err != nil { - return -1, err - } - if info.Running { - return info.Pid, nil - } - time.Sleep(50 * time.Millisecond) - } - return -1, execdriver.ErrNotRunning -} - -func (d *driver) getInfo(id string) ([]byte, error) { - return exec.Command("lxc-info", "-n", id).CombinedOutput() -} - -type info struct { - ID string - driver *driver -} - -func (i *info) IsRunning() bool { - var running bool - - output, err := i.driver.getInfo(i.ID) - if err != nil { - utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) - return false - } - if strings.Contains(string(output), "RUNNING") { - running = true - } - return running -} - -func (d *driver) Info(id string) execdriver.Info { - return &info{ - ID: id, - driver: d, - } -} - -func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} - - // cpu is chosen because it is the only non optional subsystem in cgroups - subsystem := "cpu" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err - } - - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - // With more recent lxc versions use, cgroup will be in lxc/ - filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") - } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil -} - -func linkLxcStart(root string) error { - sourcePath, err := exec.LookPath("lxc-start") - if err != nil { - return err - } - targetPath := path.Join(root, "lxc-start-unconfined") - - if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { - return err - } else if err == nil { - if err := os.Remove(targetPath); err != nil { - return err - } - } - return os.Symlink(sourcePath, targetPath) -} - -// TODO: This can be moved to the mountinfo reader in the mount pkg -func rootIsShared() bool { - if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { - for _, line := range strings.Split(string(data), "\n") { - cols := strings.Split(line, " ") - if len(cols) >= 6 && cols[4] == "/" { - return strings.HasPrefix(cols[6], "shared") - } - } - } - - // No idea, probably safe to assume so - return true -} - -func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { - root := path.Join(d.root, "containers", c.ID, "config.lxc") - fo, err := os.Create(root) - if err != nil { - return "", err - } - defer fo.Close() - - if err := LxcTemplateCompiled.Execute(fo, struct { - *execdriver.Command - AppArmor bool - }{ - Command: c, - AppArmor: d.apparmor, - }); err != nil { - return "", err - } - return root, nil -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/info.go docker.io-1.3.2~dfsg1/execdriver/lxc/info.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/info.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/info.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -package lxc - -import ( - "bufio" - "errors" - "strconv" - "strings" -) - -var ( - ErrCannotParse = errors.New("cannot parse raw input") -) - -type lxcInfo struct { - Running bool - Pid int -} - -func parseLxcInfo(raw string) (*lxcInfo, error) { - if raw == "" { - return nil, ErrCannotParse - } - var ( - err error - s = bufio.NewScanner(strings.NewReader(raw)) - info = &lxcInfo{} - ) - for s.Scan() { - text := s.Text() - - if s.Err() != nil { - return nil, s.Err() - } - - parts := strings.Split(text, ":") - if len(parts) < 2 { - continue - } - switch strings.ToLower(strings.TrimSpace(parts[0])) { - case "state": - info.Running = strings.TrimSpace(parts[1]) == "RUNNING" - case "pid": - info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) - if err != nil { - return nil, err - } - } - } - return info, nil -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/info_test.go docker.io-1.3.2~dfsg1/execdriver/lxc/info_test.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/info_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/info_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -package lxc - -import ( - "testing" -) - -func TestParseRunningInfo(t *testing.T) { - raw := ` - state: RUNNING - pid: 50` - - info, err := parseLxcInfo(raw) - if err != nil { - t.Fatal(err) - } - if !info.Running { - t.Fatal("info should return a running state") - } - if info.Pid != 50 { - t.Fatalf("info should have pid 50 got %d", info.Pid) - } -} - -func TestEmptyInfo(t *testing.T) { - _, err := parseLxcInfo("") - if err == nil { - t.Fatal("error should not be nil") - } -} - -func TestBadInfo(t *testing.T) { - _, err := parseLxcInfo("state") - if err != nil { - t.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/init.go docker.io-1.3.2~dfsg1/execdriver/lxc/init.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/init.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/init.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,146 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/pkg/user" - "github.com/syndtr/gocapability/capability" - "net" - "os" - "strings" - "syscall" -) - -func setupHostname(args *execdriver.InitArgs) error { - hostname := getEnv(args, "HOSTNAME") - if hostname == "" { - return nil - } - return setHostname(hostname) -} - -// Setup networking -func setupNetworking(args *execdriver.InitArgs) error { - if args.Ip != "" { - // eth0 - iface, err := net.InterfaceByName("eth0") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - ip, ipNet, err := net.ParseCIDR(args.Ip) - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { - return fmt.Errorf("Unable to set MTU: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - - // loopback - iface, err = net.InterfaceByName("lo") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - if args.Gateway != "" { - gw := net.ParseIP(args.Gateway) - if gw == nil { - return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) - } - - if err := netlink.AddDefaultGw(gw); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - - return nil -} - -// Setup working directory -func setupWorkingDirectory(args *execdriver.InitArgs) error { - if args.WorkDir == "" { - return nil - } - if err := syscall.Chdir(args.WorkDir); err != nil { - return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) - } - return nil -} - -// Takes care of dropping privileges to the desired user -func changeUser(args *execdriver.InitArgs) error { - uid, gid, suppGids, err := user.GetUserGroupSupplementary( - args.User, - syscall.Getuid(), syscall.Getgid(), - ) - if err != nil { - return err - } - - if err := syscall.Setgroups(suppGids); err != nil { - return fmt.Errorf("Setgroups failed: %v", err) - } - if err := syscall.Setgid(gid); err != nil { - return fmt.Errorf("Setgid failed: %v", err) - } - if err := syscall.Setuid(uid); err != nil { - return fmt.Errorf("Setuid failed: %v", err) - } - - return nil -} - -func setupCapabilities(args *execdriver.InitArgs) error { - if args.Privileged { - return nil - } - - drop := []capability.Cap{ - capability.CAP_SETPCAP, - capability.CAP_SYS_MODULE, - capability.CAP_SYS_RAWIO, - capability.CAP_SYS_PACCT, - capability.CAP_SYS_ADMIN, - capability.CAP_SYS_NICE, - capability.CAP_SYS_RESOURCE, - capability.CAP_SYS_TIME, - capability.CAP_SYS_TTY_CONFIG, - capability.CAP_MKNOD, - capability.CAP_AUDIT_WRITE, - capability.CAP_AUDIT_CONTROL, - capability.CAP_MAC_OVERRIDE, - capability.CAP_MAC_ADMIN, - capability.CAP_NET_ADMIN, - } - - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - - c.Unset(capability.CAPS|capability.BOUNDS, drop...) - - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } - return nil -} - -func getEnv(args *execdriver.InitArgs, key string) string { - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if parts[0] == key && len(parts) == 2 { - return parts[1] - } - } - return "" -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_init_linux.go docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_init_linux.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_init_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_init_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -// +build amd64 - -package lxc - -import ( - "syscall" -) - -func setHostname(hostname string) error { - return syscall.Sethostname([]byte(hostname)) -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_init_unsupported.go docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_init_unsupported.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_init_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_init_unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -// +build !linux !amd64 - -package lxc - -func setHostname(hostname string) error { - panic("Not supported on darwin") -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_template.go docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_template.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_template.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_template.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -package lxc - -import ( - "github.com/dotcloud/docker/execdriver" - "strings" - "text/template" -) - -const LxcTemplate = ` -{{if .Network.Interface}} -# network configuration -lxc.network.type = veth -lxc.network.link = {{.Network.Interface.Bridge}} -lxc.network.name = eth0 -{{else}} -# network is disabled (-n=false) -lxc.network.type = empty -lxc.network.flags = up -{{end}} -lxc.network.mtu = {{.Network.Mtu}} - -# root filesystem -{{$ROOTFS := .Rootfs}} -lxc.rootfs = {{$ROOTFS}} - -# use a dedicated pts for the container (and limit the number of pseudo terminal -# available) -lxc.pts = 1024 - -# disable the main console -lxc.console = none - -# no controlling tty at all -lxc.tty = 1 - -{{if .Privileged}} -lxc.cgroup.devices.allow = a -{{else}} -# no implicit access to devices -lxc.cgroup.devices.deny = a - -# /dev/null and zero -lxc.cgroup.devices.allow = c 1:3 rwm -lxc.cgroup.devices.allow = c 1:5 rwm - -# consoles -lxc.cgroup.devices.allow = c 5:1 rwm -lxc.cgroup.devices.allow = c 5:0 rwm -lxc.cgroup.devices.allow = c 4:0 rwm -lxc.cgroup.devices.allow = c 4:1 rwm - -# /dev/urandom,/dev/random -lxc.cgroup.devices.allow = c 1:9 rwm -lxc.cgroup.devices.allow = c 1:8 rwm - -# /dev/pts/ - pts namespaces are "coming soon" -lxc.cgroup.devices.allow = c 136:* rwm -lxc.cgroup.devices.allow = c 5:2 rwm - -# tuntap -lxc.cgroup.devices.allow = c 10:200 rwm - -# fuse -#lxc.cgroup.devices.allow = c 10:229 rwm - -# rtc -#lxc.cgroup.devices.allow = c 254:0 rwm -{{end}} - -# standard mount point -# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 -lxc.pivotdir = lxc_putold - -# NOTICE: These mounts must be applied within the namespace - -# WARNING: procfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://blog.zx2c4.com/749 -lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 - -# WARNING: sysfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://bit.ly/T9CkqJ -lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 - -{{if .Tty}} -lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 -{{end}} - -lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 -lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 - -{{if .Privileged}} -{{if .AppArmor}} -lxc.aa_profile = unconfined -{{else}} -#lxc.aa_profile = unconfined -{{end}} -{{end}} - -# limits -{{if .Resources}} -{{if .Resources.Memory}} -lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} -lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} -{{with $memSwap := getMemorySwap .Resources}} -lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} -{{end}} -{{end}} -{{if .Resources.CpuShares}} -lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} -{{end}} -{{end}} - -{{if .Config}} -{{range $value := .Config}} -{{$value}} -{{end}} -{{end}} -` - -var LxcTemplateCompiled *template.Template - -// Escape spaces in strings according to the fstab documentation, which is the -// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". -func escapeFstabSpaces(field string) string { - return strings.Replace(field, " ", "\\040", -1) -} - -func getMemorySwap(v *execdriver.Resources) int64 { - // By default, MemorySwap is set to twice the size of RAM. - // If you want to omit MemorySwap, set it to `-1'. - if v.MemorySwap < 0 { - return 0 - } - return v.Memory * 2 -} - -func init() { - var err error - funcMap := template.FuncMap{ - "getMemorySwap": getMemorySwap, - "escapeFstabSpaces": escapeFstabSpaces, - } - LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) - if err != nil { - panic(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_template_unit_test.go docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_template_unit_test.go --- docker.io-0.9.1~dfsg1/execdriver/lxc/lxc_template_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/lxc/lxc_template_unit_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -package lxc - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/execdriver" - "io/ioutil" - "math/rand" - "os" - "path" - "strings" - "testing" - "time" -) - -func TestLXCConfig(t *testing.T) { - root, err := ioutil.TempDir("", "TestLXCConfig") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - os.MkdirAll(path.Join(root, "containers", "1"), 0777) - - // Memory is allocated randomly for testing - rand.Seed(time.Now().UTC().UnixNano()) - var ( - memMin = 33554432 - memMax = 536870912 - mem = memMin + rand.Intn(memMax-memMin) - cpuMin = 100 - cpuMax = 10000 - cpu = cpuMin + rand.Intn(cpuMax-cpuMin) - ) - - driver, err := NewDriver(root, false) - if err != nil { - t.Fatal(err) - } - command := &execdriver.Command{ - ID: "1", - Resources: &execdriver.Resources{ - Memory: int64(mem), - CpuShares: int64(cpu), - }, - Network: &execdriver.Network{ - Mtu: 1500, - Interface: nil, - }, - } - p, err := driver.generateLXCConfig(command) - if err != nil { - t.Fatal(err) - } - grepFile(t, p, - fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) - - grepFile(t, p, - fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) -} - -func TestCustomLxcConfig(t *testing.T) { - root, err := ioutil.TempDir("", "TestCustomLxcConfig") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - os.MkdirAll(path.Join(root, "containers", "1"), 0777) - - driver, err := NewDriver(root, false) - if err != nil { - t.Fatal(err) - } - command := &execdriver.Command{ - ID: "1", - Privileged: false, - Config: []string{ - "lxc.utsname = docker", - "lxc.cgroup.cpuset.cpus = 0,1", - }, - Network: &execdriver.Network{ - Mtu: 1500, - Interface: nil, - }, - } - - p, err := driver.generateLXCConfig(command) - if err != nil { - t.Fatal(err) - } - - grepFile(t, p, "lxc.utsname = docker") - grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") -} - -func grepFile(t *testing.T, path string, pattern string) { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - var ( - line string - ) - err = nil - for err == nil { - line, err = r.ReadString('\n') - if strings.Contains(line, pattern) == true { - return - } - } - t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) -} - -func TestEscapeFstabSpaces(t *testing.T) { - var testInputs = map[string]string{ - " ": "\\040", - "": "", - "/double space": "/double\\040\\040space", - "/some long test string": "/some\\040long\\040test\\040string", - "/var/lib/docker": "/var/lib/docker", - " leading": "\\040leading", - "trailing ": "trailing\\040", - } - for in, exp := range testInputs { - if out := escapeFstabSpaces(in); exp != out { - t.Logf("Expected %s got %s", exp, out) - t.Fail() - } - } -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/MAINTAINERS docker.io-1.3.2~dfsg1/execdriver/MAINTAINERS --- docker.io-0.9.1~dfsg1/execdriver/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) diff -Nru docker.io-0.9.1~dfsg1/execdriver/native/default_template.go docker.io-1.3.2~dfsg1/execdriver/native/default_template.go --- docker.io-0.9.1~dfsg1/execdriver/native/default_template.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/native/default_template.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -package native - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/libcontainer" - "os" -) - -// createContainer populates and configures the container type with the -// data provided by the execdriver.Command -func createContainer(c *execdriver.Command) *libcontainer.Container { - container := getDefaultTemplate() - - container.Hostname = getEnv("HOSTNAME", c.Env) - container.Tty = c.Tty - container.User = c.User - container.WorkingDir = c.WorkingDir - container.Env = c.Env - - loopbackNetwork := libcontainer.Network{ - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), - Gateway: "localhost", - Type: "loopback", - Context: libcontainer.Context{}, - } - - container.Networks = []*libcontainer.Network{ - &loopbackNetwork, - } - - if c.Network.Interface != nil { - vethNetwork := libcontainer.Network{ - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), - Gateway: c.Network.Interface.Gateway, - Type: "veth", - Context: libcontainer.Context{ - "prefix": "veth", - "bridge": c.Network.Interface.Bridge, - }, - } - container.Networks = append(container.Networks, &vethNetwork) - } - - container.Cgroups.Name = c.ID - if c.Privileged { - container.Capabilities = nil - container.Cgroups.DeviceAccess = true - container.Context["apparmor_profile"] = "unconfined" - } - if c.Resources != nil { - container.Cgroups.CpuShares = c.Resources.CpuShares - container.Cgroups.Memory = c.Resources.Memory - container.Cgroups.MemorySwap = c.Resources.MemorySwap - } - // check to see if we are running in ramdisk to disable pivot root - container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" - - return container -} - -// getDefaultTemplate returns the docker default for -// the libcontainer configuration file -func getDefaultTemplate() *libcontainer.Container { - return &libcontainer.Container{ - Capabilities: libcontainer.Capabilities{ - libcontainer.GetCapability("SETPCAP"), - libcontainer.GetCapability("SYS_MODULE"), - libcontainer.GetCapability("SYS_RAWIO"), - libcontainer.GetCapability("SYS_PACCT"), - libcontainer.GetCapability("SYS_ADMIN"), - libcontainer.GetCapability("SYS_NICE"), - libcontainer.GetCapability("SYS_RESOURCE"), - libcontainer.GetCapability("SYS_TIME"), - libcontainer.GetCapability("SYS_TTY_CONFIG"), - libcontainer.GetCapability("MKNOD"), - libcontainer.GetCapability("AUDIT_WRITE"), - libcontainer.GetCapability("AUDIT_CONTROL"), - libcontainer.GetCapability("MAC_OVERRIDE"), - libcontainer.GetCapability("MAC_ADMIN"), - libcontainer.GetCapability("NET_ADMIN"), - }, - Namespaces: libcontainer.Namespaces{ - libcontainer.GetNamespace("NEWNS"), - libcontainer.GetNamespace("NEWUTS"), - libcontainer.GetNamespace("NEWIPC"), - libcontainer.GetNamespace("NEWPID"), - libcontainer.GetNamespace("NEWNET"), - }, - Cgroups: &cgroups.Cgroup{ - Parent: "docker", - DeviceAccess: false, - }, - Context: libcontainer.Context{ - "apparmor_profile": "docker-default", - }, - } -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/native/driver.go docker.io-1.3.2~dfsg1/execdriver/native/driver.go --- docker.io-0.9.1~dfsg1/execdriver/native/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/native/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,251 +0,0 @@ -package native - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/apparmor" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" - "github.com/dotcloud/docker/pkg/system" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" -) - -const ( - DriverName = "native" - Version = "0.1" -) - -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - var ( - container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) - ) - f, err := os.Open(filepath.Join(args.Root, "container.json")) - if err != nil { - return err - } - if err := json.NewDecoder(f).Decode(&container); err != nil { - f.Close() - return err - } - f.Close() - - cwd, err := os.Getwd() - if err != nil { - return err - } - syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) - if err != nil { - return err - } - if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { - return err - } - return nil - }) -} - -type driver struct { - root string -} - -func NewDriver(root string) (*driver, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - if err := apparmor.InstallDefaultProfile(); err != nil { - return nil, err - } - return &driver{ - root: root, - }, nil -} - -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := d.validateCommand(c); err != nil { - return -1, err - } - var ( - term nsinit.Terminal - container = createContainer(c) - factory = &dockerCommandFactory{c: c, driver: d} - stateWriter = &dockerStateWriter{ - callback: startCallback, - c: c, - dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, - } - ns = nsinit.NewNsInit(factory, stateWriter) - args = append([]string{c.Entrypoint}, c.Arguments...) - ) - if err := d.createContainerRoot(c.ID); err != nil { - return -1, err - } - defer d.removeContainerRoot(c.ID) - - if c.Tty { - term = &dockerTtyTerm{ - pipes: pipes, - } - } else { - term = &dockerStdTerm{ - pipes: pipes, - } - } - c.Terminal = term - if err := d.writeContainerFile(container, c.ID); err != nil { - return -1, err - } - return ns.Exec(container, term, args) -} - -func (d *driver) Kill(p *execdriver.Command, sig int) error { - err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) - d.removeContainerRoot(p.ID) - return err -} - -func (d *driver) Info(id string) execdriver.Info { - return &info{ - ID: id, - driver: d, - } -} - -func (d *driver) Name() string { - return fmt.Sprintf("%s-%s", DriverName, Version) -} - -// TODO: this can be improved with our driver -// there has to be a better way to do this -func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} - - subsystem := "devices" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err - } - - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") - } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil -} - -func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { - data, err := json.Marshal(container) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) -} - -func (d *driver) createContainerRoot(id string) error { - return os.MkdirAll(filepath.Join(d.root, id), 0655) -} - -func (d *driver) removeContainerRoot(id string) error { - return os.RemoveAll(filepath.Join(d.root, id)) -} - -func (d *driver) validateCommand(c *execdriver.Command) error { - // we need to check the Config of the command to make sure that we - // do not have any of the lxc-conf variables - for _, conf := range c.Config { - if strings.Contains(conf, "lxc") { - return fmt.Errorf("%s is not supported by the native driver", conf) - } - } - return nil -} - -func getEnv(key string, env []string) string { - for _, pair := range env { - parts := strings.Split(pair, "=") - if parts[0] == key { - return parts[1] - } - } - return "" -} - -type dockerCommandFactory struct { - c *execdriver.Command - driver *driver -} - -// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { - // we need to join the rootfs because nsinit will setup the rootfs and chroot - initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) - - d.c.Path = initPath - d.c.Args = append([]string{ - initPath, - "-driver", DriverName, - "-console", console, - "-pipe", "3", - "-root", filepath.Join(d.driver.root, d.c.ID), - "--", - }, args...) - - // set this to nil so that when we set the clone flags anything else is reset - d.c.SysProcAttr = nil - system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) - d.c.ExtraFiles = []*os.File{syncFile} - - d.c.Env = container.Env - d.c.Dir = d.c.Rootfs - - return &d.c.Cmd -} - -type dockerStateWriter struct { - dsw nsinit.StateWriter - c *execdriver.Command - callback execdriver.StartCallback -} - -func (d *dockerStateWriter) WritePid(pid int) error { - d.c.ContainerPid = pid - err := d.dsw.WritePid(pid) - if d.callback != nil { - d.callback(d.c) - } - return err -} - -func (d *dockerStateWriter) DeletePid() error { - return d.dsw.DeletePid() -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/native/info.go docker.io-1.3.2~dfsg1/execdriver/native/info.go --- docker.io-0.9.1~dfsg1/execdriver/native/info.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/native/info.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -package native - -import ( - "os" - "path/filepath" -) - -type info struct { - ID string - driver *driver -} - -// IsRunning is determined by looking for the -// pid file for a container. If the file exists then the -// container is currently running -func (i *info) IsRunning() bool { - if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { - return true - } - return false -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/native/term.go docker.io-1.3.2~dfsg1/execdriver/native/term.go --- docker.io-0.9.1~dfsg1/execdriver/native/term.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/native/term.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -/* - These types are wrappers around the libcontainer Terminal interface so that - we can resuse the docker implementations where possible. -*/ -package native - -import ( - "github.com/dotcloud/docker/execdriver" - "io" - "os" - "os/exec" -) - -type dockerStdTerm struct { - execdriver.StdConsole - pipes *execdriver.Pipes -} - -func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { - return d.AttachPipes(cmd, d.pipes) -} - -func (d *dockerStdTerm) SetMaster(master *os.File) { - // do nothing -} - -type dockerTtyTerm struct { - execdriver.TtyConsole - pipes *execdriver.Pipes -} - -func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { - go io.Copy(t.pipes.Stdout, t.MasterPty) - if t.pipes.Stdin != nil { - go io.Copy(t.MasterPty, t.pipes.Stdin) - } - return nil -} - -func (t *dockerTtyTerm) SetMaster(master *os.File) { - t.MasterPty = master -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/pipes.go docker.io-1.3.2~dfsg1/execdriver/pipes.go --- docker.io-0.9.1~dfsg1/execdriver/pipes.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/pipes.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -package execdriver - -import ( - "io" -) - -// Pipes is a wrapper around a containers output for -// stdin, stdout, stderr -type Pipes struct { - Stdin io.ReadCloser - Stdout, Stderr io.Writer -} - -func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { - p := &Pipes{ - Stdout: stdout, - Stderr: stderr, - } - if useStdin { - p.Stdin = stdin - } - return p -} diff -Nru docker.io-0.9.1~dfsg1/execdriver/termconsole.go docker.io-1.3.2~dfsg1/execdriver/termconsole.go --- docker.io-0.9.1~dfsg1/execdriver/termconsole.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/execdriver/termconsole.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -package execdriver - -import ( - "github.com/dotcloud/docker/pkg/term" - "github.com/kr/pty" - "io" - "os" - "os/exec" -) - -func SetTerminal(command *Command, pipes *Pipes) error { - var ( - term Terminal - err error - ) - if command.Tty { - term, err = NewTtyConsole(command, pipes) - } else { - term, err = NewStdConsole(command, pipes) - } - if err != nil { - return err - } - command.Terminal = term - return nil -} - -type TtyConsole struct { - MasterPty *os.File - SlavePty *os.File -} - -func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { - ptyMaster, ptySlave, err := pty.Open() - if err != nil { - return nil, err - } - tty := &TtyConsole{ - MasterPty: ptyMaster, - SlavePty: ptySlave, - } - if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { - tty.Close() - return nil, err - } - command.Console = tty.SlavePty.Name() - return tty, nil -} - -func (t *TtyConsole) Master() *os.File { - return t.MasterPty -} - -func (t *TtyConsole) Resize(h, w int) error { - return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) -} - -func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { - command.Stdout = t.SlavePty - command.Stderr = t.SlavePty - - go func() { - if wb, ok := pipes.Stdout.(interface { - CloseWriters() error - }); ok { - defer wb.CloseWriters() - } - io.Copy(pipes.Stdout, t.MasterPty) - }() - - if pipes.Stdin != nil { - command.Stdin = t.SlavePty - command.SysProcAttr.Setctty = true - - go func() { - defer pipes.Stdin.Close() - io.Copy(t.MasterPty, pipes.Stdin) - }() - } - return nil -} - -func (t *TtyConsole) Close() error { - t.SlavePty.Close() - return t.MasterPty.Close() -} - -type StdConsole struct { -} - -func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) { - std := &StdConsole{} - - if err := std.AttachPipes(&command.Cmd, pipes); err != nil { - return nil, err - } - return std, nil -} - -func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { - command.Stdout = pipes.Stdout - command.Stderr = pipes.Stderr - - if pipes.Stdin != nil { - stdin, err := command.StdinPipe() - if err != nil { - return err - } - - go func() { - defer stdin.Close() - io.Copy(stdin, pipes.Stdin) - }() - } - return nil -} - -func (s *StdConsole) Resize(h, w int) error { - // we do not need to reside a non tty - return nil -} - -func (s *StdConsole) Close() error { - // nothing to close here - return nil -} diff -Nru docker.io-0.9.1~dfsg1/FIXME docker.io-1.3.2~dfsg1/FIXME --- docker.io-0.9.1~dfsg1/FIXME 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/FIXME 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ - -## FIXME - -This file is a loose collection of things to improve in the codebase, for the internal -use of the maintainers. - -They are not big enough to be in the roadmap, not user-facing enough to be github issues, -and not important enough to be discussed in the mailing list. - -They are just like FIXME comments in the source code, except we're not sure where in the source -to put them - so we put them here :) - - -* Run linter on codebase -* Unify build commands and regular commands -* Move source code into src/ subdir for clarity -* docker build: on non-existent local path for ADD, don't show full absolute path on the host -* use size header for progress bar in pull -* Clean up context upload in build!!! -* Parallel pull -* Upgrade dockerd without stopping containers -* Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^/ { print $3 }')`) -* Simple command to clean up containers for disk space -* Clean up the ProgressReader api, it's a PITA to use diff -Nru docker.io-0.9.1~dfsg1/.gitignore docker.io-1.3.2~dfsg1/.gitignore --- docker.io-0.9.1~dfsg1/.gitignore 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/.gitignore 2014-11-24 17:38:01.000000000 +0000 @@ -23,3 +23,7 @@ vendor/pkg/ pyenv Vagrantfile +docs/AWS_S3_BUCKET +docs/GIT_BRANCH +docs/VERSION +docs/GITCOMMIT diff -Nru docker.io-0.9.1~dfsg1/graph/export.go docker.io-1.3.2~dfsg1/graph/export.go --- docker.io-0.9.1~dfsg1/graph/export.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/export.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,168 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" +) + +// CmdImageExport exports all images with the given tag. All versions +// containing the same tag are exported. The resulting output is an +// uncompressed tar ball. +// name is the set of tags to export. +// out is the writer where the images are written to. +func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) + } + // get image json + tempdir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tempdir) + + rootRepoMap := map[string]Repository{} + for _, name := range job.Args { + log.Debugf("Serializing %s", name) + rootRepo := s.Repositories[name] + if rootRepo != nil { + // this is a base repo name, like 'busybox' + for _, id := range rootRepo { + if _, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = rootRepo + } else { + log.Debugf("Duplicate key [%s]", name) + if rootRepoMap[name].Contains(rootRepo) { + log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo) + rootRepoMap[name].Update(rootRepo) + } + + if err := s.exportImage(job.Eng, id, tempdir); err != nil { + return job.Error(err) + } + } + } else { + img, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + if img != nil { + // This is a named image like 'busybox:latest' + repoName, repoTag := parsers.ParseRepositoryTag(name) + + // check this length, because a lookup of a truncated has will not have a tag + // and will not need to be added to this map + if len(repoTag) > 0 { + if _, ok := rootRepoMap[repoName]; !ok { + rootRepoMap[repoName] = Repository{repoTag: img.ID} + } else { + log.Debugf("Duplicate key [%s]", repoName) + newRepo := Repository{repoTag: img.ID} + if rootRepoMap[repoName].Contains(newRepo) { + log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo) + rootRepoMap[repoName].Update(newRepo) + } + } + if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { + return job.Error(err) + } + + } else { + // this must be an ID that didn't get looked up just right? + if err := s.exportImage(job.Eng, name, tempdir); err != nil { + return job.Error(err) + } + } + } + log.Debugf("End Serializing %s", name) + } + // write repositories, if there is something to write + if len(rootRepoMap) > 0 { + rootRepoJson, _ := json.Marshal(rootRepoMap) + if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { + return job.Error(err) + } + } else { + log.Debugf("There were no repositories to write") + } + + fs, err := archive.Tar(tempdir, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if _, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } + log.Debugf("End export job: %s", job.Name) + return engine.StatusOK +} + +// FIXME: this should be a top-level function, not a class method +func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { + for n := name; n != ""; { + // temporary directory + tmpImageDir := path.Join(tempdir, n) + if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + + var version = "1.0" + var versionBuf = []byte(version) + + if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { + return err + } + + // serialize json + json, err := os.Create(path.Join(tmpImageDir, "json")) + if err != nil { + return err + } + job := eng.Job("image_inspect", n) + job.SetenvBool("raw", true) + job.Stdout.Add(json) + if err := job.Run(); err != nil { + return err + } + + // serialize filesystem + fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) + if err != nil { + return err + } + job = eng.Job("image_tarlayer", n) + job.Stdout.Add(fsTar) + if err := job.Run(); err != nil { + return err + } + + // find parent + job = eng.Job("image_get", n) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + return err + } + n = info.Get("Parent") + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/graph/graph.go docker.io-1.3.2~dfsg1/graph/graph.go --- docker.io-0.9.1~dfsg1/graph/graph.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/graph.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,397 @@ +package graph + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// A Graph is a store for versioned filesystem images and the relationship between them. +type Graph struct { + Root string + idIndex *truncindex.TruncIndex + driver graphdriver.Driver +} + +// NewGraph instantiates a new graph at the given root path in the filesystem. +// `root` will be created if it doesn't exist. +func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { + abspath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + // Create the root directory if it doesn't exists + if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + graph := &Graph{ + Root: abspath, + idIndex: truncindex.NewTruncIndex([]string{}), + driver: driver, + } + if err := graph.restore(); err != nil { + return nil, err + } + return graph, nil +} + +func (graph *Graph) restore() error { + dir, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + var ids = []string{} + for _, v := range dir { + id := v.Name() + if graph.driver.Exists(id) { + ids = append(ids, id) + } + } + graph.idIndex = truncindex.NewTruncIndex(ids) + log.Debugf("Restored %d elements", len(dir)) + return nil +} + +// FIXME: Implement error subclass instead of looking at the error text +// Note: This is the way golang implements os.IsNotExists on Plan9 +func (graph *Graph) IsNotExist(err error) bool { + return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) +} + +// Exists returns true if an image is registered at the given id. +// If the image doesn't exist or if an error is encountered, false is returned. +func (graph *Graph) Exists(id string) bool { + if _, err := graph.Get(id); err != nil { + return false + } + return true +} + +// Get returns the image with the given id, or an error if the image doesn't exist. +func (graph *Graph) Get(name string) (*image.Image, error) { + id, err := graph.idIndex.Get(name) + if err != nil { + return nil, err + } + img, err := image.LoadImage(graph.ImageRoot(id)) + if err != nil { + return nil, err + } + if img.ID != id { + return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) + } + img.SetGraph(graph) + + if img.Size < 0 { + size, err := graph.driver.DiffSize(img.ID, img.Parent) + if err != nil { + return nil, fmt.Errorf("unable to calculate size of image id %q: %s", img.ID, err) + } + + img.Size = size + if err := img.SaveSize(graph.ImageRoot(id)); err != nil { + return nil, err + } + } + return img, nil +} + +// Create creates a new image and registers it in the graph. +func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { + img := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: comment, + Created: time.Now().UTC(), + DockerVersion: dockerversion.VERSION, + Author: author, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + + if containerID != "" { + img.Parent = containerImage + img.Container = containerID + img.ContainerConfig = *containerConfig + } + + if err := graph.Register(img, nil, layerData); err != nil { + return nil, err + } + return img, nil +} + +// Register imports a pre-existing image into the graph. +func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archive.ArchiveReader) (err error) { + defer func() { + // If any error occurs, remove the new dir from the driver. + // Don't check for errors since the dir might not have been created. + // FIXME: this leaves a possible race condition. + if err != nil { + graph.driver.Remove(img.ID) + } + }() + if err := utils.ValidateID(img.ID); err != nil { + return err + } + // (This is a convenience to save time. Race conditions are taken care of by os.Rename) + if graph.Exists(img.ID) { + return fmt.Errorf("Image %s already exists", img.ID) + } + + // Ensure that the image root does not exist on the filesystem + // when it is not registered in the graph. + // This is common when you switch from one graph driver to another + if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { + return err + } + + // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. + // (the graph is the source of truth). + // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. + // (FIXME: make that mandatory for drivers). + graph.driver.Remove(img.ID) + + tmp, err := graph.Mktemp("") + defer os.RemoveAll(tmp) + if err != nil { + return fmt.Errorf("Mktemp failed: %s", err) + } + + // Create root filesystem in the driver + if err := graph.driver.Create(img.ID, img.Parent); err != nil { + return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) + } + // Apply the diff/layer + img.SetGraph(graph) + if err := image.StoreImage(img, jsonData, layerData, tmp); err != nil { + return err + } + // Commit + if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { + return err + } + graph.idIndex.Add(img.ID) + return nil +} + +// TempLayerArchive creates a temporary archive of the given image's filesystem layer. +// The archive is stored on disk and will be automatically deleted as soon as has been read. +// If output is not nil, a human-readable progress bar will be written to it. +// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? +func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { + image, err := graph.Get(id) + if err != nil { + return nil, err + } + tmp, err := graph.Mktemp("") + if err != nil { + return nil, err + } + a, err := image.TarLayer() + if err != nil { + return nil, err + } + progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") + defer progress.Close() + return archive.NewTempArchive(progress, tmp) +} + +// Mktemp creates a temporary sub-directory inside the graph's filesystem. +func (graph *Graph) Mktemp(id string) (string, error) { + dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + return dir, nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func SetupInitLayer(initLayer string) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerinit": "file", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = path.Join(prev, p) + syscall.Unlink(path.Join(initLayer, prev)) + } + + if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { + return err + } + switch typ { + case "dir": + if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { + return err + } + case "file": + f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + default: + if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Check if given error is "not empty". +// Note: this is the way golang does it internally with os.IsNotExists. +func isNotEmpty(err error) bool { + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + err = pe.Err + case *os.LinkError: + err = pe.Err + } + return strings.Contains(err.Error(), " not empty") +} + +// Delete atomically removes an image from the graph. +func (graph *Graph) Delete(name string) error { + id, err := graph.idIndex.Get(name) + if err != nil { + return err + } + tmp, err := graph.Mktemp("") + graph.idIndex.Delete(id) + if err == nil { + err = os.Rename(graph.ImageRoot(id), tmp) + // On err make tmp point to old dir and cleanup unused tmp dir + if err != nil { + os.RemoveAll(tmp) + tmp = graph.ImageRoot(id) + } + } else { + // On err make tmp point to old dir for cleanup + tmp = graph.ImageRoot(id) + } + // Remove rootfs data from the driver + graph.driver.Remove(id) + // Remove the trashed image directory + return os.RemoveAll(tmp) +} + +// Map returns a list of all images in the graph, addressable by ID. +func (graph *Graph) Map() (map[string]*image.Image, error) { + images := make(map[string]*image.Image) + err := graph.walkAll(func(image *image.Image) { + images[image.ID] = image + }) + if err != nil { + return nil, err + } + return images, nil +} + +// walkAll iterates over each image in the graph, and passes it to a handler. +// The walking order is undetermined. +func (graph *Graph) walkAll(handler func(*image.Image)) error { + files, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + for _, st := range files { + if img, err := graph.Get(st.Name()); err != nil { + // Skip image + continue + } else if handler != nil { + handler(img) + } + } + return nil +} + +// ByParent returns a lookup table of images by their parent. +// If an image of id ID has 3 children images, then the value for key ID +// will be a list of 3 images. +// If an image has no children, it will not have an entry in the table. +func (graph *Graph) ByParent() (map[string][]*image.Image, error) { + byParent := make(map[string][]*image.Image) + err := graph.walkAll(func(img *image.Image) { + parent, err := graph.Get(img.Parent) + if err != nil { + return + } + if children, exists := byParent[parent.ID]; exists { + byParent[parent.ID] = append(children, img) + } else { + byParent[parent.ID] = []*image.Image{img} + } + }) + return byParent, err +} + +// Heads returns all heads in the graph, keyed by id. +// A head is an image which is not the parent of another image in the graph. +func (graph *Graph) Heads() (map[string]*image.Image, error) { + heads := make(map[string]*image.Image) + byParent, err := graph.ByParent() + if err != nil { + return nil, err + } + err = graph.walkAll(func(image *image.Image) { + // If it's not in the byParent lookup table, then + // it's not a parent -> so it's a head! + if _, exists := byParent[image.ID]; !exists { + heads[image.ID] = image + } + }) + return heads, err +} + +func (graph *Graph) ImageRoot(id string) string { + return path.Join(graph.Root, id) +} + +func (graph *Graph) Driver() graphdriver.Driver { + return graph.driver +} diff -Nru docker.io-0.9.1~dfsg1/graph/history.go docker.io-1.3.2~dfsg1/graph/history.go --- docker.io-0.9.1~dfsg1/graph/history.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/history.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + name := job.Args[0] + foundImage, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + lookupMap := make(map[string][]string) + for name, repository := range s.Repositories { + for tag, id := range repository { + // If the ID already has a reverse lookup, do not update it unless for "latest" + if _, exists := lookupMap[id]; !exists { + lookupMap[id] = []string{} + } + lookupMap[id] = append(lookupMap[id], name+":"+tag) + } + } + + outs := engine.NewTable("Created", 0) + err = foundImage.WalkHistory(func(img *image.Image) error { + out := &engine.Env{} + out.Set("Id", img.ID) + out.SetInt64("Created", img.Created.Unix()) + out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) + out.SetList("Tags", lookupMap[img.ID]) + out.SetInt64("Size", img.Size) + outs.Add(out) + return nil + }) + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graph/import.go docker.io-1.3.2~dfsg1/graph/import.go --- docker.io-0.9.1~dfsg1/graph/import.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/import.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +package graph + +import ( + "net/http" + "net/url" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/utils" +) + +func (s *TagStore) CmdImport(job *engine.Job) engine.Status { + if n := len(job.Args); n != 2 && n != 3 { + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) + } + var ( + src = job.Args[0] + repo = job.Args[1] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + archive archive.ArchiveReader + resp *http.Response + ) + if len(job.Args) > 2 { + tag = job.Args[2] + } + + if src == "-" { + archive = job.Stdin + } else { + u, err := url.Parse(src) + if err != nil { + return job.Error(err) + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = utils.Download(u.String()) + if err != nil { + return job.Error(err) + } + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader + } + img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) + if err != nil { + return job.Error(err) + } + // Optionally register the image at REPO/TAG + if repo != "" { + if err := s.Set(repo, tag, img.ID, true); err != nil { + return job.Error(err) + } + } + job.Stdout.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graph/list.go docker.io-1.3.2~dfsg1/graph/list.go --- docker.io-0.9.1~dfsg1/graph/list.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/list.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,103 @@ +package graph + +import ( + "fmt" + "log" + "path" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers/filters" +) + +func (s *TagStore) CmdImages(job *engine.Job) engine.Status { + var ( + allImages map[string]*image.Image + err error + filt_tagged = true + ) + + imageFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if strings.ToLower(value) == "true" { + filt_tagged = false + } + } + } + + if job.GetenvBool("all") && filt_tagged { + allImages, err = s.graph.Map() + } else { + allImages, err = s.graph.Heads() + } + if err != nil { + return job.Error(err) + } + lookup := make(map[string]*engine.Env) + s.Lock() + for name, repository := range s.Repositories { + if job.Getenv("filter") != "" { + if match, _ := path.Match(job.Getenv("filter"), name); !match { + continue + } + } + for tag, id := range repository { + image, err := s.graph.Get(id) + if err != nil { + log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) + continue + } + + if out, exists := lookup[id]; exists { + if filt_tagged { + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + } + } else { + // get the boolean list for if only the untagged images are requested + delete(allImages, id) + if filt_tagged { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + lookup[id] = out + } + } + + } + } + s.Unlock() + + outs := engine.NewTable("Created", len(lookup)) + for _, value := range lookup { + outs.Add(value) + } + + // Display images which aren't part of a repository/tag + if job.Getenv("filter") == "" { + for _, image := range allImages { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{":"}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + outs.Add(out) + } + } + + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graph/load.go docker.io-1.3.2~dfsg1/graph/load.go --- docker.io-0.9.1~dfsg1/graph/load.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/load.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,129 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/log" +) + +// Loads a set of images into the repository. This is the complementary of ImageExport. +// The input stream is an uncompressed tar ball containing images and metadata. +func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { + tmpImageDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tmpImageDir) + + var ( + repoTarFile = path.Join(tmpImageDir, "repo.tar") + repoDir = path.Join(tmpImageDir, "repo") + ) + + tarFile, err := os.Create(repoTarFile) + if err != nil { + return job.Error(err) + } + if _, err := io.Copy(tarFile, job.Stdin); err != nil { + return job.Error(err) + } + tarFile.Close() + + repoFile, err := os.Open(repoTarFile) + if err != nil { + return job.Error(err) + } + if err := os.Mkdir(repoDir, os.ModeDir); err != nil { + return job.Error(err) + } + images, err := s.graph.Map() + if err != nil { + return job.Error(err) + } + excludes := make([]string, len(images)) + i := 0 + for k := range images { + excludes[i] = k + i++ + } + if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { + return job.Error(err) + } + + dirs, err := ioutil.ReadDir(repoDir) + if err != nil { + return job.Error(err) + } + + for _, d := range dirs { + if d.IsDir() { + if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { + return job.Error(err) + } + } + } + + repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) + if err == nil { + repositories := map[string]Repository{} + if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { + return job.Error(err) + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := s.Set(imageName, tag, address, true); err != nil { + return job.Error(err) + } + } + } + } else if !os.IsNotExist(err) { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { + if err := eng.Job("image_get", address).Run(); err != nil { + log.Debugf("Loading %s", address) + + imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) + if err != nil { + log.Debugf("Error reading json", err) + return err + } + + layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) + if err != nil { + log.Debugf("Error reading embedded tar", err) + return err + } + img, err := image.NewImgJSON(imageJson) + if err != nil { + log.Debugf("Error unmarshalling json", err) + return err + } + if img.Parent != "" { + if !s.graph.Exists(img.Parent) { + if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { + return err + } + } + } + if err := s.graph.Register(img, imageJson, layer); err != nil { + return err + } + } + log.Debugf("Completed processing %s", address) + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/graph/MAINTAINERS docker.io-1.3.2~dfsg1/graph/MAINTAINERS --- docker.io-0.9.1~dfsg1/graph/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff -Nru docker.io-0.9.1~dfsg1/graph/pools_test.go docker.io-1.3.2~dfsg1/graph/pools_test.go --- docker.io-0.9.1~dfsg1/graph/pools_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/pools_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,49 @@ +package graph + +import ( + "testing" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +func TestPools(t *testing.T) { + s := &TagStore{ + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + + if _, err := s.poolAdd("pull", "test1"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("pull", "test2"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("push", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} diff -Nru docker.io-0.9.1~dfsg1/graph/pull.go docker.io-1.3.2~dfsg1/graph/pull.go --- docker.io-0.9.1~dfsg1/graph/pull.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/pull.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,601 @@ +package graph + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" +) + +func (s *TagStore) verifyManifest(eng *engine.Engine, manifestBytes []byte) (*registry.ManifestData, bool, error) { + sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") + if err != nil { + return nil, false, fmt.Errorf("error parsing payload: %s", err) + } + keys, err := sig.Verify() + if err != nil { + return nil, false, fmt.Errorf("error verifying payload: %s", err) + } + + payload, err := sig.Payload() + if err != nil { + return nil, false, fmt.Errorf("error retrieving payload: %s", err) + } + + var manifest registry.ManifestData + if err := json.Unmarshal(payload, &manifest); err != nil { + return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) + } + if manifest.SchemaVersion != 1 { + return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion) + } + + var verified bool + for _, key := range keys { + job := eng.Job("trust_key_check") + b, err := key.MarshalJSON() + if err != nil { + return nil, false, fmt.Errorf("error marshalling public key: %s", err) + } + namespace := manifest.Name + if namespace[0] != '/' { + namespace = "/" + namespace + } + stdoutBuffer := bytes.NewBuffer(nil) + + job.Args = append(job.Args, namespace) + job.Setenv("PublicKey", string(b)) + // Check key has read/write permission (0x03) + job.SetenvInt("Permission", 0x03) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return nil, false, fmt.Errorf("error running key check: %s", err) + } + result := engine.Tail(stdoutBuffer, 1) + log.Debugf("Key check result: %q", result) + if result == "verified" { + verified = true + } + } + + return &manifest, verified, nil +} + +func (s *TagStore) CmdPull(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 && n != 2 { + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) + } + + var ( + localName = job.Args[0] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + mirrors []string + ) + + if len(job.Args) > 1 { + tag = job.Args[1] + } + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + + c, err := s.poolAdd("pull", localName+":"+tag) + if err != nil { + if c != nil { + // Another pull of the same repository is already taking place; just wait for it to finish + job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) + <-c + return engine.StatusOK + } + return job.Error(err) + } + defer s.poolRemove("pull", localName+":"+tag) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + + r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + + var isOfficial bool + if endpoint.VersionString(1) == registry.IndexServerAddress() { + // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" + localName = remoteName + + isOfficial = isOfficialName(remoteName) + if isOfficial && strings.IndexRune(remoteName, '/') == -1 { + remoteName = "library/" + remoteName + } + + // Use provided mirrors, if any + mirrors = s.mirrors + } + + if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { + j := job.Eng.Job("trust_update_base") + if err = j.Run(); err != nil { + return job.Errorf("error updating trust base graph: %s", err) + } + + if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { + return engine.StatusOK + } else if err != registry.ErrDoesNotExist { + log.Errorf("Error from V2 registry: %s", err) + } + } + + if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool, mirrors []string) error { + out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) + + repoData, err := r.GetRepositoryData(remoteName) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", remoteName) + } + // Unexpected HTTP error + return err + } + + log.Debugf("Retrieving the tag list") + tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) + if err != nil { + log.Errorf("%v", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + log.Debugf("Registering tags") + // If no tag has been specified, pull them all + var imageId string + if askedTag == "" { + for tag, id := range tagsList { + repoData.ImgList[id].Tag = tag + } + } else { + // Otherwise, check that the tag exists and use only that one + id, exists := tagsList[askedTag] + if !exists { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) + } + imageId = id + repoData.ImgList[id].Tag = askedTag + } + + errors := make(chan error) + + layers_downloaded := false + for _, image := range repoData.ImgList { + downloadImage := func(img *registry.ImgData) { + if askedTag != "" && img.Tag != askedTag { + log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) + if parallel { + errors <- nil + } + return + } + + if img.Tag == "" { + log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + if parallel { + errors <- nil + } + return + } + + // ensure no two downloads of the same image happen at the same time + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + if parallel { + errors <- nil + } + return + } + defer s.poolRemove("pull", "img:"+img.ID) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) + success := false + var lastErr, err error + var is_downloaded bool + if mirrors != nil { + for _, ep := range mirrors { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // Don't report errors when pulling from mirrors. + log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, localName, ep, err) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr) + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) + if parallel { + errors <- err + return + } + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + if parallel { + errors <- nil + } + } + + if parallel { + go downloadImage(image) + } else { + downloadImage(image) + } + } + if parallel { + var lastError error + for i := 0; i < len(repoData.ImgList); i++ { + if err := <-errors; err != nil { + lastError = err + } + } + if lastError != nil { + return lastError + } + + } + for tag, id := range tagsList { + if askedTag != "" && id != imageId { + continue + } + if err := s.Set(localName, tag, id, true); err != nil { + return err + } + } + + requestedTag := localName + if len(askedTag) > 0 { + requestedTag = localName + ":" + askedTag + } + WriteStatus(requestedTag, out, sf, layers_downloaded) + return nil +} + +func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { + history, err := r.GetRemoteHistory(imgID, endpoint, token) + if err != nil { + return false, err + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + layers_downloaded := false + for i := len(history) - 1; i >= 0; i-- { + id := history[i] + + // ensure no two downloads of the same layer happen at the same time + if c, err := s.poolAdd("pull", "layer:"+id); err != nil { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) + <-c + } + defer s.poolRemove("pull", "layer:"+id) + + if !s.graph.Exists(id) { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + layers_downloaded = true + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } + } + + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) + layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } + layers_downloaded = true + defer layer.Close() + + err = s.graph.Register(img, imgJSON, + utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) + return layers_downloaded, err + } else { + break + } + } + } + out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) + } + return layers_downloaded, nil +} + +func WriteStatus(requestedTag string, out io.Writer, sf *utils.StreamFormatter, layers_downloaded bool) { + if layers_downloaded { + out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) + } else { + out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) + } +} + +// downloadInfo is used to pass information from download to extractor +type downloadInfo struct { + imgJSON []byte + img *image.Image + tmpFile *os.File + length int64 + downloaded bool + err chan error +} + +func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) error { + var layersDownloaded bool + if tag == "" { + log.Debugf("Pulling tag list from V2 registry for %s", remoteName) + tags, err := r.GetV2RemoteTags(remoteName, nil) + if err != nil { + return err + } + for _, t := range tags { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, t, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + } else { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, tag, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + + requestedTag := localName + if len(tag) > 0 { + requestedTag = localName + ":" + tag + } + WriteStatus(requestedTag, out, sf, layersDownloaded) + return nil +} + +func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) (bool, error) { + log.Debugf("Pulling tag from V2 registry: %q", tag) + manifestBytes, err := r.GetV2ImageManifest(remoteName, tag, nil) + if err != nil { + return false, err + } + + manifest, verified, err := s.verifyManifest(eng, manifestBytes) + if err != nil { + return false, fmt.Errorf("error verifying manifest: %s", err) + } + + if len(manifest.FSLayers) != len(manifest.History) { + return false, fmt.Errorf("length of history not equal to number of layers") + } + + if verified { + out.Write(sf.FormatStatus(localName+":"+tag, "The image you are pulling has been verified")) + } else { + out.Write(sf.FormatStatus(tag, "Pulling from %s", localName)) + } + + if len(manifest.FSLayers) == 0 { + return false, fmt.Errorf("no blobSums in manifest") + } + + downloads := make([]downloadInfo, len(manifest.FSLayers)) + + for i := len(manifest.FSLayers) - 1; i >= 0; i-- { + var ( + sumStr = manifest.FSLayers[i].BlobSum + imgJSON = []byte(manifest.History[i].V1Compatibility) + ) + + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return false, fmt.Errorf("failed to parse json: %s", err) + } + downloads[i].img = img + + // Check if exists + if s.graph.Exists(img.ID) { + log.Debugf("Image already exists: %s", img.ID) + continue + } + + chunks := strings.SplitN(sumStr, ":", 2) + if len(chunks) < 2 { + return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) + } + sumType, checksum := chunks[0], chunks[1] + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) + + downloadFunc := func(di *downloadInfo) error { + log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) + + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + } else { + defer s.poolRemove("pull", "img:"+img.ID) + tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") + if err != nil { + return err + } + + r, l, err := r.GetV2ImageBlobReader(remoteName, sumType, checksum, nil) + if err != nil { + return err + } + defer r.Close() + io.Copy(tmpFile, utils.ProgressReader(r, int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) + di.tmpFile = tmpFile + di.length = l + di.downloaded = true + } + di.imgJSON = imgJSON + + return nil + } + + if parallel { + downloads[i].err = make(chan error) + go func(di *downloadInfo) { + di.err <- downloadFunc(di) + }(&downloads[i]) + } else { + err := downloadFunc(&downloads[i]) + if err != nil { + return false, err + } + } + } + + var layersDownloaded bool + for i := len(downloads) - 1; i >= 0; i-- { + d := &downloads[i] + if d.err != nil { + err := <-d.err + if err != nil { + return false, err + } + } + if d.downloaded { + // if tmpFile is empty assume download and extracted elsewhere + defer os.Remove(d.tmpFile.Name()) + defer d.tmpFile.Close() + d.tmpFile.Seek(0, 0) + if d.tmpFile != nil { + err = s.graph.Register(d.img, d.imgJSON, + utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) + if err != nil { + return false, err + } + + // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) + } + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) + layersDownloaded = true + } else { + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) + } + + } + + if err = s.Set(localName, tag, downloads[0].img.ID, true); err != nil { + return false, err + } + + return layersDownloaded, nil +} diff -Nru docker.io-0.9.1~dfsg1/graph/push.go docker.io-1.3.2~dfsg1/graph/push.go --- docker.io-0.9.1~dfsg1/graph/push.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/push.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,250 @@ +package graph + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +// Retrieve the all the images to be uploaded in the correct order +func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen = make(map[string]bool) + tagsByImage = make(map[string][]string) + ) + + for tag, id := range localRepo { + if requestedTag != "" && requestedTag != tag { + continue + } + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() { + if err != nil { + return nil, nil, err + } + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break + } + + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) + } + + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + log.Debugf("Image list: %v", imageList) + log.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil +} + +func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { + out = utils.NewWriteFlusher(out) + log.Debugf("Local repo: %s", localRepo) + imgList, tagsByImage, err := s.getImageList(localRepo, tag) + if err != nil { + return err + } + + out.Write(sf.FormatStatus("", "Sending image list")) + + var ( + repoData *registry.RepositoryData + imageIndex []*registry.ImgData + ) + + for _, imgId := range imgList { + if tags, exists := tagsByImage[imgId]; exists { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: tag, + }) + } + } else { + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is accociated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: "", + }) + + } + } + + log.Debugf("Preparing to push %s with the following images and tags", localRepo) + for _, data := range imageIndex { + log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) + if err != nil { + return err + } + + nTag := 1 + if tag == "" { + nTag = len(localRepo) + } + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) + + for _, imgId := range imgList { + if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { + out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) + } else { + if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { + // FIXME: Continue on error? + return err + } + } + + for _, tag := range tagsByImage[imgId] { + out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) + + if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { + return err + } + } + } + } + + if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { + return err + } + + return nil +} + +func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { + out = utils.NewWriteFlusher(out) + jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) + if err != nil { + return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) + + imgData := ®istry.ImgData{ + ID: imgID, + } + + // Send the json + if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { + if err == registry.ErrAlreadyExists { + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + layerData, err := s.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) + if err != nil { + return "", fmt.Errorf("Failed to generate layer archive: %s", err) + } + defer os.RemoveAll(layerData.Name()) + + // Send the layer + log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) + + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { + return "", err + } + + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} + +// FIXME: Allow to interrupt current push when new push of same image is done. +func (s *TagStore) CmdPush(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + var ( + localName = job.Args[0] + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + ) + + tag := job.Getenv("tag") + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + if _, err := s.poolAdd("push", localName); err != nil { + return job.Error(err) + } + defer s.poolRemove("push", localName) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + + img, err := s.graph.Get(localName) + r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) + if err2 != nil { + return job.Error(err2) + } + + if err != nil { + reposLen := 1 + if tag == "" { + reposLen = len(s.Repositories[localName]) + } + job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) + // If it fails, try to get the repository + if localRepo, exists := s.Repositories[localName]; exists { + if err := s.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Error(err) + } + + var token []string + job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) + if _, err := s.pushImage(r, job.Stdout, remoteName, img.ID, endpoint.String(), token, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graph/service.go docker.io-1.3.2~dfsg1/graph/service.go --- docker.io-0.9.1~dfsg1/graph/service.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/service.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,182 @@ +package graph + +import ( + "fmt" + "io" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" +) + +func (s *TagStore) Install(eng *engine.Engine) error { + for name, handler := range map[string]engine.Handler{ + "image_set": s.CmdSet, + "image_tag": s.CmdTag, + "tag": s.CmdTagLegacy, // FIXME merge with "image_tag" + "image_get": s.CmdGet, + "image_inspect": s.CmdLookup, + "image_tarlayer": s.CmdTarLayer, + "image_export": s.CmdImageExport, + "history": s.CmdHistory, + "images": s.CmdImages, + "viz": s.CmdViz, + "load": s.CmdLoad, + "import": s.CmdImport, + "pull": s.CmdPull, + "push": s.CmdPush, + } { + if err := eng.Register(name, handler); err != nil { + return fmt.Errorf("Could not register %q: %v", name, err) + } + } + return nil +} + +// CmdSet stores a new image in the graph. +// Images are stored in the graph using 4 elements: +// - A user-defined ID +// - A collection of metadata describing the image +// - A directory tree stored as a tar archive (also called the "layer") +// - A reference to a "parent" ID on top of which the layer should be applied +// +// NOTE: even though the parent ID is only useful in relation to the layer and how +// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', +// it is treated as a top-level property of the image. This is an artifact of early +// design and should probably be cleaned up in the future to simplify the design. +// +// Syntax: image_set ID +// Input: +// - Layer content must be streamed in tar format on stdin. An empty input is +// valid and represents a nil layer. +// +// - Image metadata must be passed in the command environment. +// 'json': a json-encoded object with all image metadata. +// It will be stored as-is, without any encoding/decoding artifacts. +// That is a requirement of the current registry client implementation, +// because a re-encoded json might invalidate the image checksum at +// the next upload, even with functionaly identical content. +func (s *TagStore) CmdSet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + var ( + imgJSON = []byte(job.Getenv("json")) + layer = job.Stdin + ) + if len(imgJSON) == 0 { + return job.Errorf("mandatory key 'json' is not set") + } + // We have to pass an *image.Image object, even though it will be completely + // ignored in favor of the redundant json data. + // FIXME: the current prototype of Graph.Register is stupid and redundant. + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return job.Error(err) + } + if err := s.graph.Register(img, imgJSON, layer); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdGet returns information about an image. +// If the image doesn't exist, an empty object is returned, to allow +// checking for an image's existence. +func (s *TagStore) CmdGet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + res := &engine.Env{} + img, err := s.LookupImage(name) + // Note: if the image doesn't exist, LookupImage returns + // nil, nil. + if err != nil { + return job.Error(err) + } + if img != nil { + // We don't directly expose all fields of the Image objects, + // to maintain a clean public API which we can maintain over + // time even if the underlying structure changes. + // We should have done this with the Image object to begin with... + // but we didn't, so now we're doing it here. + // + // Fields that we're probably better off not including: + // - Config/ContainerConfig. Those structs have the same sprawl problem, + // so we shouldn't include them wholesale either. + // - Comment: initially created to fulfill the "every image is a git commit" + // metaphor, in practice people either ignore it or use it as a + // generic description field which it isn't. On deprecation shortlist. + res.SetAuto("Created", img.Created) + res.Set("Author", img.Author) + res.Set("Os", img.OS) + res.Set("Architecture", img.Architecture) + res.Set("DockerVersion", img.DockerVersion) + res.Set("Id", img.ID) + res.Set("Parent", img.Parent) + } + res.WriteTo(job.Stdout) + return engine.StatusOK +} + +// CmdLookup return an image encoded in JSON +func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + if job.GetenvBool("raw") { + b, err := image.RawJson() + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", image.ID) + out.Set("Parent", image.Parent) + out.Set("Comment", image.Comment) + out.SetAuto("Created", image.Created) + out.Set("Container", image.Container) + out.SetJson("ContainerConfig", image.ContainerConfig) + out.Set("DockerVersion", image.DockerVersion) + out.Set("Author", image.Author) + out.SetJson("Config", image.Config) + out.Set("Architecture", image.Architecture) + out.Set("Os", image.OS) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + if _, err = out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} + +// CmdTarLayer return the tarLayer of the image +func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + fs, err := image.TarLayer() + if err != nil { + return job.Error(err) + } + defer fs.Close() + + written, err := io.Copy(job.Stdout, fs) + if err != nil { + return job.Error(err) + } + log.Debugf("rendered layer for %s of [%d] size", image.ID, written) + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} diff -Nru docker.io-0.9.1~dfsg1/graph/tag.go docker.io-1.3.2~dfsg1/graph/tag.go --- docker.io-0.9.1~dfsg1/graph/tag.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/tag.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,44 @@ +package graph + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers" +) + +// CmdTag assigns a new name and tag to an existing image. If the tag already exists, +// it is changed and the image previously referenced by the tag loses that reference. +// This may cause the old image to be garbage-collected if its reference count reaches zero. +// +// Syntax: image_tag NEWNAME OLDNAME +// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 +func (s *TagStore) CmdTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) + } + var ( + newName = job.Args[0] + oldName = job.Args[1] + ) + newRepo, newTag := parsers.ParseRepositoryTag(newName) + // FIXME: Set should either parse both old and new name, or neither. + // the current prototype is inconsistent. + if err := s.Set(newRepo, newTag, oldName, true); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. +func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { + if len(job.Args) != 2 && len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) + } + var tag string + if len(job.Args) == 3 { + tag = job.Args[2] + } + if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graph/tags.go docker.io-1.3.2~dfsg1/graph/tags.go --- docker.io-0.9.1~dfsg1/graph/tags.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/tags.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,356 @@ +package graph + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/utils" +) + +const DEFAULTTAG = "latest" + +var ( + validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) +) + +type TagStore struct { + path string + graph *Graph + mirrors []string + insecureRegistries []string + Repositories map[string]Repository + sync.Mutex + // FIXME: move push/pull-related fields + // to a helper type + pullingPool map[string]chan struct{} + pushingPool map[string]chan struct{} +} + +type Repository map[string]string + +// update Repository mapping with content of u +func (r Repository) Update(u Repository) { + for k, v := range u { + r[k] = v + } +} + +// return true if the contents of u Repository, are wholly contained in r Repository +func (r Repository) Contains(u Repository) bool { + for k, v := range u { + // if u's key is not present in r OR u's key is present, but not the same value + if rv, ok := r[k]; !ok || (ok && rv != v) { + return false + } + } + return true +} + +func NewTagStore(path string, graph *Graph, mirrors []string, insecureRegistries []string) (*TagStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + store := &TagStore{ + path: abspath, + graph: graph, + mirrors: mirrors, + insecureRegistries: insecureRegistries, + Repositories: make(map[string]Repository), + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +func (store *TagStore) save() error { + // Store the json ball + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { + return err + } + return nil +} + +func (store *TagStore) reload() error { + jsonData, err := ioutil.ReadFile(store.path) + if err != nil { + return err + } + if err := json.Unmarshal(jsonData, store); err != nil { + return err + } + return nil +} + +func (store *TagStore) LookupImage(name string) (*image.Image, error) { + // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else + // (so we can pass all errors here) + repos, tag := parsers.ParseRepositoryTag(name) + if tag == "" { + tag = DEFAULTTAG + } + img, err := store.GetImage(repos, tag) + store.Lock() + defer store.Unlock() + if err != nil { + return nil, err + } else if img == nil { + if img, err = store.graph.Get(name); err != nil { + return nil, err + } + } + return img, nil +} + +// Return a reverse-lookup table of all the names which refer to each image +// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} +func (store *TagStore) ByID() map[string][]string { + store.Lock() + defer store.Unlock() + byID := make(map[string][]string) + for repoName, repository := range store.Repositories { + for tag, id := range repository { + name := repoName + ":" + tag + if _, exists := byID[id]; !exists { + byID[id] = []string{name} + } else { + byID[id] = append(byID[id], name) + sort.Strings(byID[id]) + } + } + } + return byID +} + +func (store *TagStore) ImageName(id string) string { + if names, exists := store.ByID()[id]; exists && len(names) > 0 { + return names[0] + } + return utils.TruncateID(id) +} + +func (store *TagStore) DeleteAll(id string) error { + names, exists := store.ByID()[id] + if !exists || len(names) == 0 { + return nil + } + for _, name := range names { + if strings.Contains(name, ":") { + nameParts := strings.Split(name, ":") + if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { + return err + } + } else { + if _, err := store.Delete(name, ""); err != nil { + return err + } + } + } + return nil +} + +func (store *TagStore) Delete(repoName, tag string) (bool, error) { + store.Lock() + defer store.Unlock() + deleted := false + if err := store.reload(); err != nil { + return false, err + } + if r, exists := store.Repositories[repoName]; exists { + if tag != "" { + if _, exists2 := r[tag]; exists2 { + delete(r, tag) + if len(r) == 0 { + delete(store.Repositories, repoName) + } + deleted = true + } else { + return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) + } + } else { + delete(store.Repositories, repoName) + deleted = true + } + } else { + return false, fmt.Errorf("No such repository: %s", repoName) + } + return deleted, store.save() +} + +func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { + img, err := store.LookupImage(imageName) + store.Lock() + defer store.Unlock() + if err != nil { + return err + } + if tag == "" { + tag = DEFAULTTAG + } + if err := validateRepoName(repoName); err != nil { + return err + } + if err := ValidateTagName(tag); err != nil { + return err + } + if err := store.reload(); err != nil { + return err + } + var repo Repository + if r, exists := store.Repositories[repoName]; exists { + repo = r + } else { + repo = make(map[string]string) + if old, exists := store.Repositories[repoName]; exists && !force { + return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) + } + store.Repositories[repoName] = repo + } + repo[tag] = img.ID + return store.save() +} + +func (store *TagStore) Get(repoName string) (Repository, error) { + store.Lock() + defer store.Unlock() + if err := store.reload(); err != nil { + return nil, err + } + if r, exists := store.Repositories[repoName]; exists { + return r, nil + } + return nil, nil +} + +func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { + repo, err := store.Get(repoName) + store.Lock() + defer store.Unlock() + if err != nil { + return nil, err + } else if repo == nil { + return nil, nil + } + if revision, exists := repo[tagOrID]; exists { + return store.graph.Get(revision) + } + // If no matching tag is found, search through images for a matching image id + for _, revision := range repo { + if strings.HasPrefix(revision, tagOrID) { + return store.graph.Get(revision) + } + } + return nil, nil +} + +func (store *TagStore) GetRepoRefs() map[string][]string { + store.Lock() + reporefs := make(map[string][]string) + + for name, repository := range store.Repositories { + for tag, id := range repository { + shortID := utils.TruncateID(id) + reporefs[shortID] = append(reporefs[shortID], fmt.Sprintf("%s:%s", name, tag)) + } + } + store.Unlock() + return reporefs +} + +// isOfficialName returns whether a repo name is considered an official +// repository. Official repositories are repos with names within +// the library namespace or which default to the library namespace +// by not providing one. +func isOfficialName(name string) bool { + if strings.HasPrefix(name, "library/") { + return true + } + if strings.IndexRune(name, '/') == -1 { + return true + } + return false +} + +// Validate the name of a repository +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + return nil +} + +// Validate the name of a tag +func ValidateTagName(name string) error { + if name == "" { + return fmt.Errorf("Tag name can't be empty") + } + if !validTagName.MatchString(name) { + return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 2, maximum 30 in length", name) + } + return nil +} + +func (store *TagStore) poolAdd(kind, key string) (chan struct{}, error) { + store.Lock() + defer store.Unlock() + + if c, exists := store.pullingPool[key]; exists { + return c, fmt.Errorf("pull %s is already in progress", key) + } + if c, exists := store.pushingPool[key]; exists { + return c, fmt.Errorf("push %s is already in progress", key) + } + + c := make(chan struct{}) + switch kind { + case "pull": + store.pullingPool[key] = c + case "push": + store.pushingPool[key] = c + default: + return nil, fmt.Errorf("Unknown pool type") + } + return c, nil +} + +func (store *TagStore) poolRemove(kind, key string) error { + store.Lock() + defer store.Unlock() + switch kind { + case "pull": + if c, exists := store.pullingPool[key]; exists { + close(c) + delete(store.pullingPool, key) + } + case "push": + if c, exists := store.pushingPool[key]; exists { + close(c) + delete(store.pushingPool, key) + } + default: + return fmt.Errorf("Unknown pool type") + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/graph/tags_unit_test.go docker.io-1.3.2~dfsg1/graph/tags_unit_test.go --- docker.io-0.9.1~dfsg1/graph/tags_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/tags_unit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,150 @@ +package graph + +import ( + "bytes" + "io" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/image" + "github.com/docker/docker/utils" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +const ( + testImageName = "myapp" + testImageID = "foo" +) + +func fakeTar() (io.Reader, error) { + uid := os.Getuid() + gid := os.Getgid() + + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + + // Leaving these fields blank requires root privileges + hdr.Uid = uid + hdr.Gid = gid + + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root, nil) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph, nil, nil) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &image.Image{ID: testImageID} + if err := graph.Register(img, nil, archive); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + defer store.graph.driver.Cleanup() + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} + +func TestValidTagName(t *testing.T) { + validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err != nil { + t.Errorf("'%s' should've been a valid tag", tag) + } + } +} + +func TestInvalidTagName(t *testing.T) { + validTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err == nil { + t.Errorf("'%s' shouldn't have been a valid tag", tag) + } + } +} + +func TestOfficialName(t *testing.T) { + names := map[string]bool{ + "library/ubuntu": true, + "nonlibrary/ubuntu": false, + "ubuntu": true, + "other/library": false, + } + for name, isOfficial := range names { + result := isOfficialName(name) + if result != isOfficial { + t.Errorf("Unexpected result for %s\n\tExpecting: %v\n\tActual: %v", name, isOfficial, result) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/graph/viz.go docker.io-1.3.2~dfsg1/graph/viz.go --- docker.io-0.9.1~dfsg1/graph/viz.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph/viz.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdViz(job *engine.Job) engine.Status { + images, _ := s.graph.Map() + if images == nil { + return engine.StatusOK + } + job.Stdout.Write([]byte("digraph docker {\n")) + + var ( + parentImage *image.Image + err error + ) + for _, image := range images { + parentImage, err = image.GetParent() + if err != nil { + return job.Errorf("Error while getting parent image: %v", err) + } + if parentImage != nil { + job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) + } else { + job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) + } + } + + for id, repos := range s.GetRepoRefs() { + job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) + } + job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/aufs.go docker.io-1.3.2~dfsg1/graphdriver/aufs/aufs.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/aufs.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/aufs.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,401 +0,0 @@ -/* - -aufs driver directory structure - -. -├── layers // Metadata of layers -│   ├── 1 -│   ├── 2 -│   └── 3 -├── diffs // Content of the layer -│   ├── 1 // Contains layers that need to be mounted for the id -│   ├── 2 -│   └── 3 -└── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - mountpk "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" - "os" - "os/exec" - "path" - "strings" - "sync" -) - -var ( - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") -) - -func init() { - graphdriver.Register("aufs", Init) -} - -type Driver struct { - root string - sync.Mutex // Protects concurrent modification to active - active map[string]int -} - -// New returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string) (graphdriver.Driver, error) { - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, err - } - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - active: make(map[string]int), - } - - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := os.MkdirAll(root, 0755); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - for _, p := range paths { - if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { - return nil, err - } - } - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a Driver) rootPath() string { - return a.root -} - -func (Driver) String() string { - return "aufs" -} - -func (a Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - } -} - -// Exists returns true if the given id is registered with -// this driver -func (a Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// Three folders are created for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent string) error { - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIds(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - return nil -} - -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - for _, p := range paths { - if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { - return err - } - } - return nil -} - -// Unmount and remove the dir information -func (a *Driver) Remove(id string) error { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - if a.active[id] != 0 { - utils.Errorf("Warning: removing active id %s\n", id) - } - - // Make sure the dir is umounted first - if err := a.unmount(id); err != nil { - return err - } - tmpDirs := []string{ - "mnt", - "diff", - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - for _, p := range tmpDirs { - - realPath := path.Join(a.rootPath(), p, id) - tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) - if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { - return err - } - defer os.RemoveAll(tmpPath) - } - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Return the rootfs path for the id -// This will mount the dir at it's given path -func (a *Driver) Get(id string) (string, error) { - ids, err := getParentIds(a.rootPath(), id) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - ids = []string{} - } - - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - count := a.active[id] - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - out := path.Join(a.rootPath(), "diff", id) - if len(ids) > 0 { - out = path.Join(a.rootPath(), "mnt", id) - - if count == 0 { - if err := a.mount(id); err != nil { - return "", err - } - } - } - - a.active[id] = count + 1 - - return out, nil -} - -func (a *Driver) Put(id string) { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - if count := a.active[id]; count > 1 { - a.active[id] = count - 1 - } else { - ids, _ := getParentIds(a.rootPath(), id) - // We only mounted if there are any parents - if ids != nil && len(ids) > 0 { - a.unmount(id) - } - delete(a.active, id) - } -} - -// Returns an archive of the contents for the id -func (a *Driver) Diff(id string) (archive.Archive, error) { - return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - }) -} - -func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { - return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) -} - -// Returns the size of the contents for the id -func (a *Driver) DiffSize(id string) (int64, error) { - return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) Changes(id string) ([]archive.Change, error) { - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIds(a.rootPath(), id) - if err != nil { - return nil, err - } - if len(parentIds) == 0 { - return nil, fmt.Errorf("Dir %s does not have any parent layers", id) - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string) error { - // If the id is mounted or we get an error return - if mounted, err := a.mounted(id); err != nil || mounted { - return err - } - - var ( - target = path.Join(a.rootPath(), "mnt", id) - rw = path.Join(a.rootPath(), "diff", id) - ) - - layers, err := a.getParentLayerPaths(id) - if err != nil { - return err - } - - if err := a.aufsMount(layers, rw, target); err != nil { - return err - } - return nil -} - -func (a *Driver) unmount(id string) error { - if mounted, err := a.mounted(id); err != nil || !mounted { - return err - } - target := path.Join(a.rootPath(), "mnt", id) - return Unmount(target) -} - -func (a *Driver) mounted(id string) (bool, error) { - target := path.Join(a.rootPath(), "mnt", id) - return mountpk.Mounted(target) -} - -// During cleanup aufs needs to unmount all mountpoints -func (a *Driver) Cleanup() error { - ids, err := loadIds(path.Join(a.rootPath(), "layers")) - if err != nil { - return err - } - for _, id := range ids { - if err := a.unmount(id); err != nil { - utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) - } - } - return nil -} - -func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - if err = a.tryMount(ro, rw, target); err != nil { - if err = a.mountRw(rw, target); err != nil { - return - } - - for _, layer := range ro { - branch := fmt.Sprintf("append:%s=ro+wh", layer) - if err = mount("none", target, "aufs", MsRemount, branch); err != nil { - return - } - } - } - return -} - -// Try to mount using the aufs fast path, if this fails then -// append ro layers. -func (a *Driver) tryMount(ro []string, rw, target string) (err error) { - var ( - rwBranch = fmt.Sprintf("%s=rw", rw) - roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) - ) - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)) -} - -func (a *Driver) mountRw(rw, target string) error { - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw)) -} - -func rollbackMount(target string, err error) { - if err != nil { - Unmount(target) - } -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/aufs_test.go docker.io-1.3.2~dfsg1/graphdriver/aufs/aufs_test.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/aufs_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/aufs_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,697 +0,0 @@ -package aufs - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - "io/ioutil" - "os" - "path" - "testing" -) - -var ( - tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") -) - -func testInit(dir string, t *testing.T) graphdriver.Driver { - d, err := Init(dir) - if err != nil { - if err == ErrAufsNotSupported { - t.Skip(err) - } else { - t.Fatal(err) - } - } - return d -} - -func newDriver(t *testing.T) *Driver { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - return d.(*Driver) -} - -func TestNewDriver(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - defer os.RemoveAll(tmp) - if d == nil { - t.Fatalf("Driver should not be nil") - } -} - -func TestAufsString(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if d.String() != "aufs" { - t.Fatalf("Expected aufs got %s", d.String()) - } -} - -func TestCreateDirStructure(t *testing.T) { - newDriver(t) - defer os.RemoveAll(tmp) - - paths := []string{ - "mnt", - "layers", - "diff", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p)); err != nil { - t.Fatal(err) - } - } -} - -// We should be able to create two drivers with the same dir structure -func TestNewDriverFromExistingDir(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - testInit(tmp, t) - testInit(tmp, t) - os.RemoveAll(tmp) -} - -func TestCreateNewDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestCreateNewDirStructure(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { - t.Fatal(err) - } - } -} - -func TestRemoveImage(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) - } - } -} - -func TestGetWithoutParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - expected := path.Join(tmp, "diff", "1") - if diffPath != expected { - t.Fatalf("Expected path %s got %s", expected, diffPath) - } -} - -func TestCleanupWithNoDirs(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestCleanupWithDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestMountedFalseResponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - response, err := d.mounted("1") - if err != nil { - t.Fatal(err) - } - - if response != false { - t.Fatalf("Response if dir id 1 is mounted should be false") - } -} - -func TestMountedTrueReponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - _, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - - response, err := d.mounted("2") - if err != nil { - t.Fatal(err) - } - - if response != true { - t.Fatalf("Response if dir id 2 is mounted should be true") - } -} - -func TestMountWithParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - expected := path.Join(tmp, "mnt", "2") - if mntPath != expected { - t.Fatalf("Expected %s got %s", expected, mntPath) - } -} - -func TestRemoveMountedDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - mounted, err := d.mounted("2") - if err != nil { - t.Fatal(err) - } - - if !mounted { - t.Fatalf("Dir id 2 should be mounted") - } - - if err := d.Remove("2"); err != nil { - t.Fatal(err) - } -} - -func TestCreateWithInvalidParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "docker"); err == nil { - t.Fatalf("Error should not be nil with parent does not exist") - } -} - -func TestGetDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - a, err := d.Diff("1") - if err != nil { - t.Fatal(err) - } - if a == nil { - t.Fatalf("Archive should not be nil") - } -} - -func TestChanges(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPoint, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err := os.Create(path.Join(mntPoint, "test.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err := d.Changes("2") - if err != nil { - t.Fatal(err) - } - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change := changes[0] - - expectedPath := "/test.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } - - if err := d.Create("3", "2"); err != nil { - t.Fatal(err) - } - mntPoint, err = d.Get("3") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err = os.Create(path.Join(mntPoint, "test2.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err = d.Changes("3") - if err != nil { - t.Fatal(err) - } - - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change = changes[0] - - expectedPath = "/test2.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } -} - -func TestDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } -} - -func TestChildDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - diffSize, err = d.DiffSize("2") - if err != nil { - t.Fatal(err) - } - // The diff size for the child should be zero - if diffSize != 0 { - t.Fatalf("Expected size to be %d got %d", 0, diffSize) - } -} - -func TestExists(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if d.Exists("none") { - t.Fatal("id name should not exist in the driver") - } - - if !d.Exists("1") { - t.Fatal("id 1 should exist in the driver") - } -} - -func TestStatus(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - status := d.Status() - if status == nil || len(status) == 0 { - t.Fatal("Status should not be nil or empty") - } - rootDir := status[0] - dirs := status[1] - if rootDir[0] != "Root Dir" { - t.Fatalf("Expected Root Dir got %s", rootDir[0]) - } - if rootDir[1] != d.rootPath() { - t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) - } - if dirs[0] != "Dirs" { - t.Fatalf("Expected Dirs got %s", dirs[0]) - } - if dirs[1] != "1" { - t.Fatalf("Expected 1 got %s", dirs[1]) - } -} - -func TestApplyDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - diff, err := d.Diff("1") - if err != nil { - t.Fatal(err) - } - - if err := d.Create("2", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("3", "2"); err != nil { - t.Fatal(err) - } - - if err := d.ApplyDiff("3", diff); err != nil { - t.Fatal(err) - } - - // Ensure that the file is in the mount point for id 3 - - mountPoint, err := d.Get("3") - if err != nil { - t.Fatal(err) - } - if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { - t.Fatal(err) - } -} - -func hash(c string) string { - h := sha256.New() - fmt.Fprint(h, c) - return hex.EncodeToString(h.Sum(nil)) -} - -func TestMountMoreThan42Layers(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - var last string - var expected int - - for i := 1; i < 127; i++ { - expected++ - var ( - parent = fmt.Sprintf("%d", i-1) - current = fmt.Sprintf("%d", i) - ) - - if parent == "0" { - parent = "" - } else { - parent = hash(parent) - } - current = hash(current) - - if err := d.Create(current, parent); err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - point, err := d.Get(current) - if err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - f, err := os.Create(path.Join(point, current)) - if err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - f.Close() - - if i%10 == 0 { - if err := os.Remove(path.Join(point, parent)); err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - expected-- - } - last = current - } - - // Perform the actual mount for the top most image - point, err := d.Get(last) - if err != nil { - t.Fatal(err) - } - files, err := ioutil.ReadDir(point) - if err != nil { - t.Fatal(err) - } - if len(files) != expected { - t.Fatalf("Expected %d got %d", expected, len(files)) - } -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/dirs.go docker.io-1.3.2~dfsg1/graphdriver/aufs/dirs.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/dirs.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/dirs.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -package aufs - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIds(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/migrate.go docker.io-1.3.2~dfsg1/graphdriver/aufs/migrate.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/migrate.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/migrate.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -package aufs - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" -) - -type metadata struct { - ID string `json:"id"` - ParentID string `json:"parent,omitempty"` - Image string `json:"Image,omitempty"` - - parent *metadata -} - -func pathExists(pth string) bool { - if _, err := os.Stat(pth); err != nil { - return false - } - return true -} - -// Migrate existing images and containers from docker < 0.7.x -// -// The format pre 0.7 is for docker to store the metadata and filesystem -// content in the same directory. For the migration to work we need to move Image layer -// data from /var/lib/docker/graph//layers to the diff of the registered id. -// -// Next we need to migrate the container's rw layer to diff of the driver. After the -// contents are migrated we need to register the image and container ids with the -// driver. -// -// For the migration we try to move the folder containing the layer files, if that -// fails because the data is currently mounted we will fallback to creating a -// symlink. -func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { - if pathExists(path.Join(pth, "graph")) { - if err := a.migrateRepositories(pth); err != nil { - return err - } - if err := a.migrateImages(path.Join(pth, "graph")); err != nil { - return err - } - return a.migrateContainers(path.Join(pth, "containers"), setupInit) - } - return nil -} - -func (a *Driver) migrateRepositories(pth string) error { - name := path.Join(pth, "repositories") - if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { - fis, err := ioutil.ReadDir(pth) - if err != nil { - return err - } - - for _, fi := range fis { - if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { - if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { - return err - } - - if !a.Exists(id) { - - metadata, err := loadMetadata(path.Join(pth, id, "config.json")) - if err != nil { - return err - } - - initID := fmt.Sprintf("%s-init", id) - if err := a.Create(initID, metadata.Image); err != nil { - return err - } - - initPath, err := a.Get(initID) - if err != nil { - return err - } - // setup init layer - if err := setupInit(initPath); err != nil { - return err - } - - if err := a.Create(id, initID); err != nil { - return err - } - } - } - } - return nil -} - -func (a *Driver) migrateImages(pth string) error { - fis, err := ioutil.ReadDir(pth) - if err != nil { - return err - } - var ( - m = make(map[string]*metadata) - current *metadata - exists bool - ) - - for _, fi := range fis { - if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { - if current, exists = m[id]; !exists { - current, err = loadMetadata(path.Join(pth, id, "json")) - if err != nil { - return err - } - m[id] = current - } - } - } - - for _, v := range m { - v.parent = m[v.ParentID] - } - - migrated := make(map[string]bool) - for _, v := range m { - if err := a.migrateImage(v, pth, migrated); err != nil { - return err - } - } - return nil -} - -func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { - if !migrated[m.ID] { - if m.parent != nil { - a.migrateImage(m.parent, pth, migrated) - } - if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { - return err - } - if !a.Exists(m.ID) { - if err := a.Create(m.ID, m.ParentID); err != nil { - return err - } - } - migrated[m.ID] = true - } - return nil -} - -// tryRelocate will try to rename the old path to the new pack and if -// the operation fails, it will fallback to a symlink -func tryRelocate(oldPath, newPath string) error { - s, err := os.Lstat(newPath) - if err != nil && !os.IsNotExist(err) { - return err - } - // If the destination is a symlink then we already tried to relocate once before - // and it failed so we delete it and try to remove - if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { - if err := os.RemoveAll(newPath); err != nil { - return err - } - } - if err := os.Rename(oldPath, newPath); err != nil { - if sErr := os.Symlink(oldPath, newPath); sErr != nil { - return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) - } - } - return nil -} - -func loadMetadata(pth string) (*metadata, error) { - f, err := os.Open(pth) - if err != nil { - return nil, err - } - defer f.Close() - - var ( - out = &metadata{} - dec = json.NewDecoder(f) - ) - - if err := dec.Decode(out); err != nil { - return nil, err - } - return out, nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/mount.go docker.io-1.3.2~dfsg1/graphdriver/aufs/mount.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/mount.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/mount.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -package aufs - -import ( - "github.com/dotcloud/docker/utils" - "os/exec" - "syscall" -) - -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) - } - if err := syscall.Unmount(target, 0); err != nil { - return err - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/mount_linux.go docker.io-1.3.2~dfsg1/graphdriver/aufs/mount_linux.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/mount_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/mount_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -// +build amd64 - -package aufs - -import "syscall" - -const MsRemount = syscall.MS_REMOUNT - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/aufs/mount_unsupported.go docker.io-1.3.2~dfsg1/graphdriver/aufs/mount_unsupported.go --- docker.io-0.9.1~dfsg1/graphdriver/aufs/mount_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/aufs/mount_unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -// +build !linux !amd64 - -package aufs - -import "errors" - -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on darwin") -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/btrfs/btrfs.go docker.io-1.3.2~dfsg1/graphdriver/btrfs/btrfs.go --- docker.io-0.9.1~dfsg1/graphdriver/btrfs/btrfs.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/btrfs/btrfs.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -// +build linux,amd64 - -package btrfs - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "os" - "path" - "syscall" - "unsafe" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -func Init(home string) (graphdriver.Driver, error) { - rootdir := path.Dir(home) - - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return nil, err - } - - if buf.Type != 0x9123683E { - return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) - } - - return &Driver{ - home: home, - }, nil -} - -type Driver struct { - home string -} - -func (d *Driver) String() string { - return "btrfs" -} - -func (d *Driver) Status() [][2]string { - return nil -} - -func (d *Driver) Cleanup() error { - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func subvolDelete(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirId(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -func (d *Driver) Create(id string, parent string) error { - subvolumes := path.Join(d.home, "subvolumes") - if err := os.MkdirAll(subvolumes, 0700); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir, err := d.Get(parent) - if err != nil { - return err - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - return nil -} - -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirId(id) - if _, err := os.Stat(dir); err != nil { - return err - } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { - return err - } - return os.RemoveAll(dir) -} - -func (d *Driver) Get(id string) (string, error) { - dir := d.subvolumesDirId(id) - st, err := os.Stat(dir) - if err != nil { - return "", err - } - - if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - - return dir, nil -} - -func (d *Driver) Put(id string) { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. -} - -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirId(id) - _, err := os.Stat(dir) - return err == nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/btrfs/dummy_unsupported.go docker.io-1.3.2~dfsg1/graphdriver/btrfs/dummy_unsupported.go --- docker.io-0.9.1~dfsg1/graphdriver/btrfs/dummy_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/btrfs/dummy_unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -// +build !linux !amd64 - -package btrfs diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/attach_loopback.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/attach_loopback.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/attach_loopback.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/attach_loopback.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/utils" -) - -func stringToLoopName(src string) [LoNameSize]uint8 { - var dst [LoNameSize]uint8 - copy(dst[:], src[:]) - return dst -} - -func getNextFreeLoopbackIndex() (int, error) { - f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) - if err != nil { - return 0, err - } - defer f.Close() - - index, err := ioctlLoopCtlGetFree(f.Fd()) - if index < 0 { - index = 0 - } - return index, err -} - -func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { - // Start looking for a free /dev/loop - for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := osStat(target) - if err != nil { - if osIsNotExist(err) { - utils.Errorf("There are no more loopback device available.") - } - return nil, ErrAttachLoopbackDevice - } - - if fi.Mode()&osModeDevice != osModeDevice { - utils.Errorf("Loopback device %s is not a block device.", target) - continue - } - - // OpenFile adds O_CLOEXEC - loopFile, err = osOpenFile(target, osORdWr, 0644) - if err != nil { - utils.Errorf("Error openning loopback device: %s", err) - return nil, ErrAttachLoopbackDevice - } - - // Try to attach to the loop file - if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { - loopFile.Close() - - // If the error is EBUSY, then try the next loopback - if err != sysEBusy { - utils.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice - } - - // Otherwise, we keep going with the loop - continue - } - // In case of success, we finished. Break the loop. - break - } - - // This can't happen, but let's be sure - if loopFile == nil { - utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} - -// attachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *osFile. -func attachLoopDevice(sparseName string) (loop *osFile, err error) { - - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start loopking for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - utils.Debugf("Error retrieving the next available loopback: %s", err) - } - - // OpenFile adds O_CLOEXEC - sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) - if err != nil { - utils.Errorf("Error openning sparse file %s: %s", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - defer sparseFile.Close() - - loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) - if err != nil { - return nil, err - } - - // Set the status of the loopback device - loopInfo := &LoopInfo64{ - loFileName: stringToLoopName(loopFile.Name()), - loOffset: 0, - loFlags: LoFlagsAutoClear, - } - - if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - utils.Errorf("Cannot set up loopback device info: %s", err) - - // If the call failed, then free the loopback device - if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - utils.Errorf("Error while cleaning up the loopback device") - } - loopFile.Close() - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/deviceset.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/deviceset.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/deviceset.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/deviceset.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1090 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -var ( - DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 -) - -type DevInfo struct { - Hash string `json:"-"` - DeviceId int `json:"device_id"` - Size uint64 `json:"size"` - TransactionId uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - devices *DeviceSet `json:"-"` - - mountCount int `json:"-"` - mountPath string `json:"-"` - // A floating mount means one reference is not owned and - // will be stolen by the next mount. This allows us to - // avoid unmounting directly after creation before the - // first get (since we need to mount to set up the device - // a bit first). - floating bool `json:"-"` - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - lock sync.Mutex `json:"-"` -} - -type MetaData struct { - Devices map[string]*DevInfo `json:devices` -} - -type DeviceSet struct { - MetaData - sync.Mutex // Protects Devices map and serializes calls into libdevmapper - root string - devicePrefix string - TransactionId uint64 - NewTransactionId uint64 - nextFreeDevice int - sawBusy bool -} - -type DiskUsage struct { - Used uint64 - Total uint64 -} - -type Status struct { - PoolName string - DataLoopback string - MetadataLoopback string - Data DiskUsage - Metadata DiskUsage - SectorSize uint64 -} - -type DevStatus struct { - DeviceId int - Size uint64 - TransactionId uint64 - SizeInSectors uint64 - MappedSectors uint64 - HighestMappedSector uint64 -} - -type UnmountMode int - -const ( - UnmountRegular UnmountMode = iota - UnmountFloat - UnmountSink -) - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *DevInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *DevInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) jsonFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - return devices.devicePrefix + "-pool" -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := osStat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists, it does nothing. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { - return "", err - } - - if _, err := osStat(filename); err != nil { - if !osIsNotExist(err) { - return "", err - } - utils.Debugf("Creating loopback file %s for device-manage use", filename) - file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err = file.Truncate(size); err != nil { - return "", err - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateDeviceId() int { - // TODO: Add smarter reuse of deleted devices - id := devices.nextFreeDevice - devices.nextFreeDevice = devices.nextFreeDevice + 1 - return id -} - -func (devices *DeviceSet) allocateTransactionId() uint64 { - devices.NewTransactionId = devices.NewTransactionId + 1 - return devices.NewTransactionId -} - -func (devices *DeviceSet) saveMetadata() error { - jsonData, err := json.Marshal(devices.MetaData) - if err != nil { - return fmt.Errorf("Error encoding metadata to json: %s", err) - } - tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") - if err != nil { - return fmt.Errorf("Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { - return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - if devices.NewTransactionId != devices.TransactionId { - if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { - return fmt.Errorf("Error setting devmapper transition ID: %s", err) - } - devices.TransactionId = devices.NewTransactionId - } - return nil -} - -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { - utils.Debugf("registerDevice(%v, %v)", id, hash) - info := &DevInfo{ - Hash: hash, - DeviceId: id, - Size: size, - TransactionId: devices.allocateTransactionId(), - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - if err := devices.saveMetadata(); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error { - utils.Debugf("activateDeviceIfNeeded(%v)", hash) - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) -} - -func (devices *DeviceSet) createFilesystem(info *DevInfo) error { - devname := info.DevName() - - err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) - if err != nil { - err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) - } - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetaData() error { - utils.Debugf("loadMetadata()") - defer utils.Debugf("loadMetadata END") - _, _, _, params, err := getStatus(devices.getPoolName()) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - devices.NewTransactionId = devices.TransactionId - - jsonData, err := ioutil.ReadFile(devices.jsonFile()) - if err != nil && !osIsNotExist(err) { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - devices.MetaData.Devices = make(map[string]*DevInfo) - if jsonData != nil { - if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - for hash, d := range devices.Devices { - d.Hash = hash - d.devices = devices - - if d.DeviceId >= devices.nextFreeDevice { - devices.nextFreeDevice = d.DeviceId + 1 - } - - // If the transaction id is larger than the actual one we lost the device due to some crash - if d.TransactionId > devices.TransactionId { - utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) - delete(devices.Devices, hash) - } - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo := devices.Devices[""] - if oldInfo != nil && oldInfo.Initialized { - return nil - } - - if oldInfo != nil && !oldInfo.Initialized { - utils.Debugf("Removing uninitialized base image") - if err := devices.deleteDevice(""); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - utils.Debugf("Initializing base device-manager snapshot") - - id := devices.allocateDeviceId() - - // Create initial device - if err := createDevice(devices.getPoolDevName(), id); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) - info, err := devices.registerDevice(id, "", DefaultBaseFsSize) - if err != nil { - _ = deleteDevice(devices.getPoolDevName(), id) - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - utils.Debugf("Creating filesystem on base device-manager snapshot") - - if err = devices.activateDeviceIfNeeded(""); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - if err := devices.createFilesystem(info); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - info.Initialized = true - if err = devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - -func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - sysCloseOnExec(fd) - } - } - } - } -} - -func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { - if level >= 7 { - return // Ignore _LOG_DEBUG - } - - if strings.Contains(message, "busy") { - devices.sawBusy = true - } - - utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - metadatafilename := path.Join(dirname, "metadata") - - datafile, err := osOpenFile(datafilename, osORdWr, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("Can't shrink file") - } - - dataloopback := FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := LoopbackSetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := suspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { - return fmt.Errorf("Unable to reload pool: %s", err) - } - - // Resume the pool - if err := resumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) error { - logInit(devices) - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - hasData := devices.hasImage("data") - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - createdLoopback := !hasData || !hasMetadata - data, err := devices.ensureImage("data", DefaultDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (data): %s\n", err) - return err - } - metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (metadata): %s\n", err) - return err - } - - // Set the device prefix from the device id and inode of the docker root dir - - st, err := osStat(devices.root) - if err != nil { - return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) - } - sysSt := toSysStatT(st.Sys()) - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // docker-maj,min[-inode] stands for: - // - Managed by docker - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - utils.Debugf("Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the device -pool - utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) - info, err := getInfo(devices.getPoolName()) - if info == nil { - utils.Debugf("Error device getInfo: %s", err) - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // If the pool doesn't exist, create it - if info.Exists == 0 { - utils.Debugf("Pool doesn't exist. Creating it.") - - dataFile, err := attachLoopDevice(data) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - defer dataFile.Close() - - metadataFile, err := attachLoopDevice(metadata) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - defer metadataFile.Close() - - if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the metadata from the existing file. - if !createdLoopback { - if err = devices.loadMetaData(); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - utils.Debugf("Error device setupBaseImage: %s\n", err) - return err - } - } - - return nil -} - -func (devices *DeviceSet) AddDevice(hash, baseHash string) error { - devices.Lock() - defer devices.Unlock() - - if devices.Devices[hash] != nil { - return fmt.Errorf("hash %s already exists", hash) - } - - baseInfo := devices.Devices[baseHash] - if baseInfo == nil { - return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - deviceId := devices.allocateDeviceId() - - if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { - utils.Debugf("Error creating snap device: %s\n", err) - return err - } - - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { - deleteDevice(devices.getPoolDevName(), deviceId) - utils.Debugf("Error registering device: %s\n", err) - return err - } - return nil -} - -func (devices *DeviceSet) deleteDevice(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("hash %s doesn't exists", hash) - } - - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually - if err := devices.activateDeviceIfNeeded(hash); err == nil { - if err := BlockDeviceDiscard(info.DevName()); err != nil { - utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) - } - } - - devinfo, _ := getInfo(info.Name()) - if devinfo != nil && devinfo.Exists != 0 { - if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("Error removing device: %s\n", err) - return err - } - } - - if info.Initialized { - info.Initialized = false - if err := devices.saveMetadata(); err != nil { - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - } - - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { - utils.Debugf("Error deleting device: %s\n", err) - return err - } - - devices.allocateTransactionId() - delete(devices.Devices, info.Hash) - - if err := devices.saveMetadata(); err != nil { - devices.Devices[info.Hash] = info - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - - return nil -} - -func (devices *DeviceSet) DeleteDevice(hash string) error { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - return devices.deleteDevice(hash) -} - -func (devices *DeviceSet) deactivatePool() error { - utils.Debugf("[devmapper] deactivatePool()") - defer utils.Debugf("[devmapper] deactivatePool END") - devname := devices.getPoolDevName() - devinfo, err := getInfo(devname) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - if devinfo.Exists != 0 { - return removeDevice(devname) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(hash string) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", hash) - defer utils.Debugf("[devmapper] deactivateDevice END") - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - devinfo, err := getInfo(info.Name()) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - if devinfo.Exists != 0 { - if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - return nil -} - -// Issues the underlying dm remove operation and then waits -// for it to finish. -func (devices *DeviceSet) removeDeviceAndWait(devname string) error { - var err error - - for i := 0; i < 1000; i++ { - devices.sawBusy = false - err = removeDevice(devname) - if err == nil { - break - } - if !devices.sawBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(10 * time.Millisecond) - devices.Lock() - } - if err != nil { - return err - } - - if err := devices.waitRemove(devname); err != nil { - return err - } - return nil -} - -// waitRemove blocks until either: -// a) the device registered at - is removed, -// or b) the 10 second timeout expires. -func (devices *DeviceSet) waitRemove(devname string) error { - utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) - defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) - i := 0 - for ; i < 1000; i += 1 { - devinfo, err := getInfo(devname) - if err != nil { - // If there is an error we assume the device doesn't exist. - // The error might actually be something else, but we can't differentiate. - return nil - } - if i%100 == 0 { - utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) - } - if devinfo.Exists == 0 { - break - } - - devices.Unlock() - time.Sleep(10 * time.Millisecond) - devices.Lock() - } - if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) - } - return nil -} - -// waitClose blocks until either: -// a) the device registered at - is closed, -// or b) the 10 second timeout expires. -func (devices *DeviceSet) waitClose(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - i := 0 - for ; i < 1000; i += 1 { - devinfo, err := getInfo(info.Name()) - if err != nil { - return err - } - if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) - } - if devinfo.OpenCount == 0 { - break - } - devices.Unlock() - time.Sleep(10 * time.Millisecond) - devices.Lock() - } - if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to close", hash) - } - return nil -} - -func (devices *DeviceSet) Shutdown() error { - devices.Lock() - defer devices.Unlock() - - utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) - utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) - defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) - - for _, info := range devices.Devices { - info.lock.Lock() - if info.mountCount > 0 { - if err := sysUnmount(info.mountPath, 0); err != nil { - utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) - } - } - info.lock.Unlock() - } - - for _, d := range devices.Devices { - d.lock.Lock() - - if err := devices.waitClose(d.Hash); err != nil { - utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) - } - if err := devices.deactivateDevice(d.Hash); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) - } - - d.lock.Unlock() - } - - if err := devices.deactivatePool(); err != nil { - utils.Debugf("Shutdown deactivate pool , error: %s\n", err) - } - - return nil -} - -func (devices *DeviceSet) MountDevice(hash, path string) error { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - if info.mountCount > 0 { - if path != info.mountPath { - return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) - } - - if info.floating { - // Steal floating ref - info.floating = false - } else { - info.mountCount++ - } - return nil - } - - if err := devices.activateDeviceIfNeeded(hash); err != nil { - return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) - } - - var flags uintptr = sysMsMgcVal - - err := sysMount(info.DevName(), path, "ext4", flags, "discard") - if err != nil && err == sysEInval { - err = sysMount(info.DevName(), path, "ext4", flags, "") - } - if err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) - } - - info.mountCount = 1 - info.mountPath = path - info.floating = false - - return devices.setInitialized(hash) -} - -func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) - defer utils.Debugf("[devmapper] UnmountDevice END") - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("UnmountDevice: no such device %s\n", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - if mode == UnmountFloat { - if info.floating { - return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) - } - - // Leave this reference floating - info.floating = true - return nil - } - - if mode == UnmountSink { - if !info.floating { - // Someone already sunk this - return nil - } - // Otherwise, treat this as a regular unmount - } - - if info.mountCount == 0 { - return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) - } - - info.mountCount-- - if info.mountCount > 0 { - return nil - } - - utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) - if err := sysUnmount(info.mountPath, 0); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - utils.Debugf("[devmapper] Unmount done") - // Wait for the unmount to be effective, - // by watching the value of Info.OpenCount for the device - if err := devices.waitClose(hash); err != nil { - return err - } - - devices.deactivateDevice(hash) - - info.mountPath = "" - - return nil -} - -func (devices *DeviceSet) HasDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - return devices.Devices[hash] != nil -} - -func (devices *DeviceSet) HasInitializedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - return info != nil && info.Initialized -} - -func (devices *DeviceSet) HasActivatedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return false - } - - info.lock.Lock() - defer info.lock.Unlock() - - devinfo, _ := getInfo(info.Name()) - return devinfo != nil && devinfo.Exists != 0 -} - -func (devices *DeviceSet) setInitialized(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.Initialized = true - if err := devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = getStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return nil, fmt.Errorf("No device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - status := &DevStatus{ - DeviceId: info.DeviceId, - Size: info.Size, - TransactionId: info.TransactionId, - } - - if err := devices.activateDeviceIfNeeded(hash); err != nil { - return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) - } - - if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { - return nil, err - } else { - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - } - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataLoopback = path.Join(devices.loopbackDir(), "data") - status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - - status.SectorSize = blockSizeInSectors * 512 - } - - return status -} - -func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { - SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_doc.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_doc.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_doc.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_doc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognised ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,595 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "runtime" - "syscall" -) - -type DevmapperLogger interface { - log(level int, file string, line int, dmError int, message string) -} - -const ( - DeviceCreate TaskType = iota - DeviceReload - DeviceRemove - DeviceRemoveAll - DeviceSuspend - DeviceResume - DeviceInfo - DeviceDeps - DeviceRename - DeviceVersion - DeviceStatus - DeviceTable - DeviceWaitevent - DeviceList - DeviceClear - DeviceMknodes - DeviceListVersions - DeviceTargetMsg - DeviceSetGeometry -) - -const ( - AddNodeOnResume AddNodeType = iota - AddNodeOnCreate -) - -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrAttachLoopbackDevice = errors.New("loopback mounting failed") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") - ErrRunRemoveDevice = errors.New("running removeDevice failed") - ErrInvalidAddNode = errors.New("Invalide AddNoce type") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") -) - -type ( - Task struct { - unmanaged *CDmTask - } - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - } - TaskType int - AddNodeType int -) - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) Run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - return nil -} - -func (t *Task) SetName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) SetMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) SetSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) SetCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) SetAddNode(addNode AddNodeType) error { - if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) SetRo() error { - if res := DmTaskSetRo(t.unmanaged); res != 1 { - return ErrTaskSetRo - } - return nil -} - -func (t *Task) AddTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) GetInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, - length uint64, targetType string, params string) { - - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { - loopInfo, err := ioctlLoopGetStatus64(file.Fd()) - if err != nil { - utils.Errorf("Error get loopback backing file: %s\n", err) - return 0, 0, ErrGetLoopbackBackingFile - } - return loopInfo.loDevice, loopInfo.loInode, nil -} - -func LoopbackSetCapacity(file *osFile) error { - if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - utils.Errorf("Error loopbackSetCapacity: %s", err) - return ErrLoopbackSetCapacity - } - return nil -} - -func FindLoopDeviceFor(file *osFile) *osFile { - stat, err := file.Stat() - if err != nil { - return nil - } - targetInode := stat.Sys().(*sysStatT).Ino - targetDevice := stat.Sys().(*sysStatT).Dev - - for i := 0; true; i++ { - path := fmt.Sprintf("/dev/loop%d", i) - - file, err := osOpenFile(path, osORdWr, 0) - if err != nil { - if osIsNotExist(err) { - return nil - } - - // Ignore all errors until the first not-exist - // we want to continue looking for the file - continue - } - - dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == targetDevice && inode == targetInode { - return file - } - file.Close() - } - - return nil -} - -func UdevWait(cookie uint) error { - if res := DmUdevWait(cookie); res != 1 { - utils.Debugf("Failed to wait on udev cookie %d", cookie) - return ErrUdevWait - } - return nil -} - -func LogInitVerbose(level int) { - DmLogInitVerbose(level) -} - -var dmLogger DevmapperLogger = nil - -func logInit(logger DevmapperLogger) { - dmLogger = logger - LogWithErrnoInit() -} - -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - utils.Debugf("Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// Useful helper for cleanup -func RemoveDevice(name string) error { - task := TaskCreate(DeviceRemove) - if task == nil { - return ErrCreateRemoveTask - } - if err := task.SetName(name); err != nil { - utils.Debugf("Can't set task name %s", name) - return err - } - if err := task.Run(); err != nil { - return ErrRunRemoveDevice - } - return nil -} - -func GetBlockDeviceSize(file *osFile) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - utils.Errorf("Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -func BlockDeviceDiscard(path string) error { - file, err := osOpenFile(path, osORdWr, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - syscall.Sync() - - return nil -} - -// This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *osFile) error { - task, err := createTask(DeviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("Can't get data size") - } - - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" - if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (createPool)") - } - - UdevWait(cookie) - - return nil -} - -func reloadPool(poolName string, dataFile, metadataFile *osFile) error { - task, err := createTask(DeviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("Can't get data size") - } - - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" - if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate") - } - - return nil -} - -func createTask(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("Can't create task of type %d", int(t)) - } - if err := task.SetName(name); err != nil { - return nil, fmt.Errorf("Can't set task name %s", name) - } - return task, nil -} - -func getInfo(name string) (*Info, error) { - task, err := createTask(DeviceInfo, name) - if task == nil { - return nil, err - } - if err := task.Run(); err != nil { - return nil, err - } - return task.GetInfo() -} - -func getStatus(name string) (uint64, uint64, string, string, error) { - task, err := createTask(DeviceStatus, name) - if task == nil { - utils.Debugf("getStatus: Error createTask: %s", err) - return 0, 0, "", "", err - } - if err := task.Run(); err != nil { - utils.Debugf("getStatus: Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.GetInfo() - if err != nil { - utils.Debugf("getStatus: Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - utils.Debugf("getStatus: Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) - } - - _, start, length, targetType, params := task.GetNextTarget(0) - return start, length, targetType, params, nil -} - -func setTransactionId(poolName string, oldId uint64, newId uint64) error { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running setTransactionId") - } - return nil -} - -func suspendDevice(name string) error { - task, err := createTask(DeviceSuspend, name) - if task == nil { - return err - } - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceSuspend: %s", err) - } - return nil -} - -func resumeDevice(name string) error { - task, err := createTask(DeviceResume, name) - if task == nil { - return err - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceResume") - } - - UdevWait(cookie) - - return nil -} - -func createDevice(poolName string, deviceId int) error { - utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running createDevice") - } - return nil -} - -func deleteDevice(poolName string, deviceId int) error { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running deleteDevice") - } - return nil -} - -func removeDevice(name string) error { - utils.Debugf("[devmapper] removeDevice START") - defer utils.Debugf("[devmapper] removeDevice END") - task, err := createTask(DeviceRemove, name) - if task == nil { - return err - } - if err = task.Run(); err != nil { - return fmt.Errorf("Error running removeDevice") - } - return nil -} - -func activateDevice(poolName string, name string, deviceId int, size uint64) error { - task, err := createTask(DeviceCreate, name) - if task == nil { - return err - } - - params := fmt.Sprintf("%s %d", poolName, deviceId) - if err := task.AddTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("Can't add target") - } - if err := task.SetAddNode(AddNodeOnCreate); err != nil { - return fmt.Errorf("Can't add node") - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (activateDevice)") - } - - UdevWait(cookie) - - return nil -} - -func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { - devinfo, _ := getInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := suspendDevice(baseName); err != nil { - return err - } - } - - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - resumeDevice(baseName) - } - return err - } - - if err := task.SetSector(0); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") - } - - if doSuspend { - if err := resumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_log.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_log.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_log.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_log.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import "C" - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -//export DevmapperLogCallback -func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { - if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) - } -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_test.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_test.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,287 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "testing" -) - -func TestTaskCreate(t *testing.T) { - t.Skip("FIXME: not a unit test") - // Test success - taskCreate(t, DeviceInfo) - - // Test Failure - DmTaskCreate = dmTaskCreateFail - defer func() { DmTaskCreate = dmTaskCreateFct }() - if task := TaskCreate(-1); task != nil { - t.Fatalf("An error should have occured while creating an invalid task.") - } -} - -func TestTaskRun(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - // Perform the RUN - if err := task.Run(); err != nil { - t.Fatal(err) - } - // Make sure we don't have error with GetInfo - if _, err := task.GetInfo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskRun = dmTaskRunFail - defer func() { DmTaskRun = dmTaskRunFct }() - - task = taskCreate(t, DeviceInfo) - // Perform the RUN - if err := task.Run(); err != ErrTaskRun { - t.Fatalf("An error should have occured while running task.") - } - // Make sure GetInfo also fails - if _, err := task.GetInfo(); err != ErrTaskGetInfo { - t.Fatalf("GetInfo should fail if task.Run() failed.") - } -} - -func TestTaskSetName(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetName("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetName = dmTaskSetNameFail - defer func() { DmTaskSetName = dmTaskSetNameFct }() - - if err := task.SetName("test"); err != ErrTaskSetName { - t.Fatalf("An error should have occured while runnign SetName.") - } -} - -func TestTaskSetMessage(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetMessage("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetMessage = dmTaskSetMessageFail - defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() - - if err := task.SetMessage("test"); err != ErrTaskSetMessage { - t.Fatalf("An error should have occured while runnign SetMessage.") - } -} - -func TestTaskSetSector(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetSector(128); err != nil { - t.Fatal(err) - } - - DmTaskSetSector = dmTaskSetSectorFail - defer func() { DmTaskSetSector = dmTaskSetSectorFct }() - - // Test failure - if err := task.SetSector(0); err != ErrTaskSetSector { - t.Fatalf("An error should have occured while running SetSector.") - } -} - -func TestTaskSetCookie(t *testing.T) { - t.Skip("FIXME: not a unit test") - var ( - cookie uint = 0 - task = taskCreate(t, DeviceInfo) - ) - - // Test success - if err := task.SetCookie(&cookie, 0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetCookie(nil, 0); err != ErrNilCookie { - t.Fatalf("An error should have occured while running SetCookie with nil cookie.") - } - - DmTaskSetCookie = dmTaskSetCookieFail - defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() - - if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { - t.Fatalf("An error should have occured while running SetCookie.") - } -} - -func TestTaskSetAddNode(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetAddNode(0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetAddNode(-1); err != ErrInvalidAddNode { - t.Fatalf("An error should have occured running SetAddNode with wrong node.") - } - - DmTaskSetAddNode = dmTaskSetAddNodeFail - defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() - - if err := task.SetAddNode(0); err != ErrTaskSetAddNode { - t.Fatalf("An error should have occured running SetAddNode.") - } -} - -func TestTaskSetRo(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetRo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetRo = dmTaskSetRoFail - defer func() { DmTaskSetRo = dmTaskSetRoFct }() - - if err := task.SetRo(); err != ErrTaskSetRo { - t.Fatalf("An error should have occured running SetRo.") - } -} - -func TestTaskAddTarget(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.AddTarget(0, 128, "thinp", ""); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskAddTarget = dmTaskAddTargetFail - defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() - - if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { - t.Fatalf("An error should have occured running AddTarget.") - } -} - -// func TestTaskGetInfo(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// // Test success -// if _, err := task.GetInfo(); err != nil { -// t.Fatal(err) -// } - -// // Test failure -// DmTaskGetInfo = dmTaskGetInfoFail -// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() - -// if _, err := task.GetInfo(); err != ErrTaskGetInfo { -// t.Fatalf("An error should have occured running GetInfo.") -// } -// } - -// func TestTaskGetNextTarget(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { -// t.Fatalf("The next target should not be 0.") -// } -// } - -/// Utils -func taskCreate(t *testing.T, taskType TaskType) *Task { - task := TaskCreate(taskType) - if task == nil { - t.Fatalf("Error creating task") - } - return task -} - -/// Failure function replacement -func dmTaskCreateFail(t int) *CDmTask { - return nil -} - -func dmTaskRunFail(task *CDmTask) int { - return -1 -} - -func dmTaskSetNameFail(task *CDmTask, name string) int { - return -1 -} - -func dmTaskSetMessageFail(task *CDmTask, message string) int { - return -1 -} - -func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { - return -1 -} - -func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { - return -1 -} - -func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { - return -1 -} - -func dmTaskSetRoFail(task *CDmTask) int { - return -1 -} - -func dmTaskAddTargetFail(task *CDmTask, - start, size uint64, ttype, params string) int { - return -1 -} - -func dmTaskGetInfoFail(task *CDmTask, info *Info) int { - return -1 -} - -func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, - target, params *string) uintptr { - return 0 -} - -func dmAttachLoopDeviceFail(filename string, fd *int) string { - return "" -} - -func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { - return 1 -} - -func dmUdevWaitFail(cookie uint) int { - return -1 -} - -func dmSetDevDirFail(dir string) int { - return -1 -} - -func dmGetLibraryVersionFail(version *string) int { - return -1 -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_wrapper.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_wrapper.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/devmapper_wrapper.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/devmapper_wrapper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -// +build linux,amd64 - -package devmapper - -/* -#cgo LDFLAGS: -L. -ldevmapper -#include -#include // FIXME: present only for defines, maybe we can remove it? -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char buffer[256]; - va_list ap; - - va_start(ap, f); - vsnprintf(buffer, 256, f, ap); - va_end(ap); - - DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "unsafe" -) - -type ( - CDmTask C.struct_dm_task - - CLoopInfo64 C.struct_loop_info64 - LoopInfo64 struct { - loDevice uint64 /* ioctl r/o */ - loInode uint64 /* ioctl r/o */ - loRdevice uint64 /* ioctl r/o */ - loOffset uint64 - loSizelimit uint64 /* bytes, 0 == max available */ - loNumber uint32 /* ioctl r/o */ - loEncrypt_type uint32 - loEncrypt_key_size uint32 /* ioctl w/o */ - loFlags uint32 /* ioctl r/o */ - loFileName [LoNameSize]uint8 - loCryptName [LoNameSize]uint8 - loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ - loInit [2]uint64 - } -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD - - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY -) - -const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE -) - -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - LogWithErrnoInit = logWithErrnoInitFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *CDmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *CDmTask { - return (*CDmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *CDmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *CDmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *CDmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *CDmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *CDmTask, - start, size uint64, ttype, params string) int { - - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetInfoFct(task *CDmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) - return uintptr(nextp) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmLogInitVerboseFct(level int) { - C.dm_log_init_verbose(C.int(level)) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/driver.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/driver.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "os" - "path" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Placeholder interfaces, to be replaced -// at integration. - -// End of placeholder interfaces. - -type Driver struct { - *DeviceSet - home string -} - -var Init = func(home string) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true) - if err != nil { - return nil, err - } - d := &Driver{ - DeviceSet: deviceSet, - home: home, - } - return d, nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Data file", s.DataLoopback}, - {"Metadata file", s.MetadataLoopback}, - {"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))}, - {"Data Space Total", fmt.Sprintf("%.1f Mb", float64(s.Data.Total)/(1024*1024))}, - {"Metadata Space Used", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Used)/(1024*1024))}, - {"Metadata Space Total", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Total)/(1024*1024))}, - } - return status -} - -func (d *Driver) Cleanup() error { - return d.DeviceSet.Shutdown() -} - -func (d *Driver) Create(id, parent string) error { - if err := d.DeviceSet.AddDevice(id, parent); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return err - } - - if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) { - return err - } - - // Create an "id" file with the container/image id in it to help reconscruct this in case - // of later problems - if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil { - return err - } - - // We float this reference so that the next Get call can - // steal it, so we don't have to unmount - if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { - return err - } - - return nil -} - -func (d *Driver) Remove(id string) error { - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // Sink the float from create in case no Get() call was made - if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { - return err - } - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -func (d *Driver) Get(id string) (string, error) { - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return "", err - } - - return path.Join(mp, "rootfs"), nil -} - -func (d *Driver) Put(id string) { - if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { - utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) - } -} - -func (d *Driver) mount(id, mountPoint string) error { - // Create the target directories if they don't exist - if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { - return err - } - // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint) -} - -func (d *Driver) Exists(id string) bool { - return d.Devices[id] != nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/driver_test.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/driver_test.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/driver_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/driver_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,886 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "io/ioutil" - "path" - "runtime" - "strings" - "syscall" - "testing" -) - -func init() { - // Reduce the size the the base fs and loopback for the tests - DefaultDataLoopbackSize = 300 * 1024 * 1024 - DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 - DefaultBaseFsSize = 300 * 1024 * 1024 -} - -// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default -func denyAllDevmapper() { - // Hijack all calls to libdevmapper with default panics. - // Authorized calls are selectively hijacked in each tests. - DmTaskCreate = func(t int) *CDmTask { - panic("DmTaskCreate: this method should not be called here") - } - DmTaskRun = func(task *CDmTask) int { - panic("DmTaskRun: this method should not be called here") - } - DmTaskSetName = func(task *CDmTask, name string) int { - panic("DmTaskSetName: this method should not be called here") - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - panic("DmTaskSetMessage: this method should not be called here") - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - panic("DmTaskSetSector: this method should not be called here") - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - panic("DmTaskSetCookie: this method should not be called here") - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - panic("DmTaskSetAddNode: this method should not be called here") - } - DmTaskSetRo = func(task *CDmTask) int { - panic("DmTaskSetRo: this method should not be called here") - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - panic("DmTaskAddTarget: this method should not be called here") - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - panic("DmTaskGetInfo: this method should not be called here") - } - DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - panic("DmGetNextTarget: this method should not be called here") - } - DmUdevWait = func(cookie uint) int { - panic("DmUdevWait: this method should not be called here") - } - DmSetDevDir = func(dir string) int { - panic("DmSetDevDir: this method should not be called here") - } - DmGetLibraryVersion = func(version *string) int { - panic("DmGetLibraryVersion: this method should not be called here") - } - DmLogInitVerbose = func(level int) { - panic("DmLogInitVerbose: this method should not be called here") - } - DmTaskDestroy = func(task *CDmTask) { - panic("DmTaskDestroy: this method should not be called here") - } - LogWithErrnoInit = func() { - panic("LogWithErrnoInit: this method should not be called here") - } -} - -func denyAllSyscall() { - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - panic("sysMount: this method should not be called here") - } - sysUnmount = func(target string, flags int) (err error) { - panic("sysUnmount: this method should not be called here") - } - sysCloseOnExec = func(fd int) { - panic("sysCloseOnExec: this method should not be called here") - } - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - panic("sysSyscall: this method should not be called here") - } - // Not a syscall, but forbidding it here anyway - Mounted = func(mnt string) (bool, error) { - panic("devmapper.Mounted: this method should not be called here") - } - // osOpenFile = os.OpenFile - // osNewFile = os.NewFile - // osCreate = os.Create - // osStat = os.Stat - // osIsNotExist = os.IsNotExist - // osIsExist = os.IsExist - // osMkdirAll = os.MkdirAll - // osRemoveAll = os.RemoveAll - // osRename = os.Rename - // osReadlink = os.Readlink - - // execRun = func(name string, args ...string) error { - // return exec.Command(name, args...).Run() - // } -} - -func mkTestDirectory(t *testing.T) string { - dir, err := ioutil.TempDir("", "docker-test-devmapper-") - if err != nil { - t.Fatal(err) - } - return dir -} - -func newDriver(t *testing.T) *Driver { - home := mkTestDirectory(t) - d, err := Init(home) - if err != nil { - t.Fatal(err) - } - return d.(*Driver) -} - -func cleanup(d *Driver) { - d.Cleanup() - osRemoveAll(d.home) -} - -type Set map[string]bool - -func (r Set) Assert(t *testing.T, names ...string) { - for _, key := range names { - required := true - if strings.HasPrefix(key, "?") { - key = key[1:] - required = false - } - if _, exists := r[key]; !exists && required { - t.Fatalf("Key not set: %s", key) - } - delete(r, key) - } - if len(r) != 0 { - t.Fatalf("Unexpected keys: %v", r) - } -} - -func TestInit(t *testing.T) { - var ( - calls = make(Set) - taskMessages = make(Set) - taskTypes = make(Set) - home = mkTestDirectory(t) - ) - defer osRemoveAll(home) - - func() { - denyAllDevmapper() - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - expectedDir := "/dev" - if dir != expectedDir { - t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) - } - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - var task1 CDmTask - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - taskTypes[fmt.Sprintf("%d", taskType)] = true - return &task1 - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) - } - // FIXME: use Set.AssertRegexp() - if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || - !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) - } - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) - } - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) - } - // This will crash if info is not dereferenceable - info.Exists = 0 - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - if expectedSector := uint64(0); sector != expectedSector { - t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) - } - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - taskMessages[message] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if start != 0 { - t.Fatalf("Wrong start: %d != %d", start, 0) - } - if ttype != "thin" && ttype != "thin-pool" { - t.Fatalf("Wrong ttype: %s", ttype) - } - // Quick smoke test - if params == "" { - t.Fatalf("Params should not be empty") - } - return 1 - } - fakeCookie := uint(4321) - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if flags != 0 { - t.Fatalf("Cookie flags should be 0 (not %x)", flags) - } - *cookie = fakeCookie - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - if cookie != fakeCookie { - t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) - } - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - if addNode != AddNodeOnCreate { - t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) - } - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - if name != "mkfs.ext4" { - t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) - } - return nil - } - driver, err := Init(home) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } - }() - }() - // Put all tests in a function to make sure the garbage collection will - // occur. - - // Call GC to cleanup runtime.Finalizers - runtime.GC() - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "DmTaskDestroy", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - taskTypes.Assert(t, "0", "6", "17") - taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") -} - -func fakeInit() func(home string) (graphdriver.Driver, error) { - oldInit := Init - Init = func(home string) (graphdriver.Driver, error) { - return &Driver{ - home: home, - }, nil - } - return oldInit -} - -func restoreInit(init func(home string) (graphdriver.Driver, error)) { - Init = init -} - -func mockAllDevmapper(calls Set) { - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - return &CDmTask{} - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - return 1 - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - return nil - } -} - -func TestDriverName(t *testing.T) { - denyAllDevmapper() - defer denyAllDevmapper() - - oldInit := fakeInit() - defer restoreInit(oldInit) - - d := newDriver(t) - if d.String() != "devicemapper" { - t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) - } -} - -func TestDriverCreate(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { - t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) - } - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "sysMount", - "DmTaskRun", - "DmTaskSetTarget", - "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - - }() - - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestDriverRemove(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - sysUnmount = func(target string, flags int) (err error) { - calls["sysUnmount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFlags := 0; flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "sysMount", - "DmTaskRun", - "DmTaskSetTarget", - "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return true, nil - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskSetCookie", - "DmTaskSetTarget", - "DmTaskSetAddNode", - "DmUdevWait", - "sysUnmount", - ) - }() - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestCleanup(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Unimplemented") - d := newDriver(t) - defer osRemoveAll(d.home) - - mountPoints := make([]string, 2) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - // Mount the id - p, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - mountPoints[0] = p - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - p, err = d.Get("2") - if err != nil { - t.Fatal(err) - } - mountPoints[1] = p - - // Ensure that all the mount points are currently mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if !mounted { - t.Fatalf("Expected %s to be mounted", p) - } - } - - // Ensure that devices are active - for _, p := range []string{"1", "2"} { - if !d.HasActivatedDevice(p) { - t.Fatalf("Expected %s to have an active device", p) - } - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - // Ensure that all the mount points are no longer mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if mounted { - t.Fatalf("Expected %s to not be mounted", p) - } - } - - // Ensure that devices are no longer activated - for _, p := range []string{"1", "2"} { - if d.HasActivatedDevice(p) { - t.Fatalf("Expected %s not be an active device", p) - } - } -} - -func TestNotMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Not implemented") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if mounted { - t.Fatal("Id 1 should not be mounted") - } -} - -func TestMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatal("Id 1 should be mounted") - } -} - -func TestInitCleanedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - driver, err := Init(d.home) - if err != nil { - t.Fatal(err) - } - d = driver.(*Driver) - defer cleanup(d) - - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } -} - -func TestMountMountedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - // Perform get on same id to ensure that it will - // not be mounted twice - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } -} - -func TestGetReturnsValidDevice(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasDevice("1") { - t.Fatalf("Expected id 1 to be in device set") - } - - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - if !d.HasActivatedDevice("1") { - t.Fatalf("Expected id 1 to be activated") - } - - if !d.HasInitializedDevice("1") { - t.Fatalf("Expected id 1 to be initialized") - } -} - -func TestDriverGetSize(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skipf("Size is currently not implemented") - - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mountPoint, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - size := int64(1024) - - f, err := osCreate(path.Join(mountPoint, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - // diffSize, err := d.DiffSize("1") - // if err != nil { - // t.Fatal(err) - // } - // if diffSize != size { - // t.Fatalf("Expected size %d got %d", size, diffSize) - // } -} - -func assertMap(t *testing.T, m map[string]bool, keys ...string) { - for _, key := range keys { - if _, exists := m[key]; !exists { - t.Fatalf("Key not set: %s", key) - } - delete(m, key) - } - if len(m) != 0 { - t.Fatalf("Unexpected keys: %v", m) - } -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/ioctl.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/ioctl.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/ioctl.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/ioctl.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "unsafe" -) - -func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) - if err != 0 { - return 0, err - } - return int(index), nil -} - -func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { - return err - } - return nil -} - -func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return err - } - return nil -} - -func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { - return err - } - return nil -} - -func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { - loopInfo := &LoopInfo64{} - - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return nil, err - } - return loopInfo, nil -} - -func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { - return err - } - return nil -} - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/mount.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/mount.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/mount.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/mount.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "path/filepath" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -var Mounted = func(mountpoint string) (bool, error) { - mntpoint, err := osStat(mountpoint) - if err != nil { - if osIsNotExist(err) { - return false, nil - } - return false, err - } - parent, err := osStat(filepath.Join(mountpoint, "..")) - if err != nil { - return false, err - } - mntpointSt := toSysStatT(mntpoint.Sys()) - parentSt := toSysStatT(parent.Sys()) - return mntpointSt.Dev != parentSt.Dev, nil -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/devmapper/sys.go docker.io-1.3.2~dfsg1/graphdriver/devmapper/sys.go --- docker.io-0.9.1~dfsg1/graphdriver/devmapper/sys.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/devmapper/sys.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "os" - "os/exec" - "syscall" -) - -type ( - sysStatT syscall.Stat_t - sysErrno syscall.Errno - - osFile struct{ *os.File } -) - -var ( - sysMount = syscall.Mount - sysUnmount = syscall.Unmount - sysCloseOnExec = syscall.CloseOnExec - sysSyscall = syscall.Syscall - - osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { - f, err := os.OpenFile(name, flag, perm) - return &osFile{File: f}, err - } - osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } - osNewFile = os.NewFile - osCreate = os.Create - osStat = os.Stat - osIsNotExist = os.IsNotExist - osIsExist = os.IsExist - osMkdirAll = os.MkdirAll - osRemoveAll = os.RemoveAll - osRename = os.Rename - osReadlink = os.Readlink - - execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } -) - -const ( - sysMsMgcVal = syscall.MS_MGC_VAL - sysMsRdOnly = syscall.MS_RDONLY - sysEInval = syscall.EINVAL - sysSysIoctl = syscall.SYS_IOCTL - sysEBusy = syscall.EBUSY - - osORdOnly = os.O_RDONLY - osORdWr = os.O_RDWR - osOCreate = os.O_CREATE - osModeDevice = os.ModeDevice -) - -func toSysStatT(i interface{}) *sysStatT { - return (*sysStatT)(i.(*syscall.Stat_t)) -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/driver.go docker.io-1.3.2~dfsg1/graphdriver/driver.go --- docker.io-0.9.1~dfsg1/graphdriver/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -package graphdriver - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/utils" - "os" - "path" -) - -type InitFunc func(root string) (Driver, error) - -type Driver interface { - String() string - - Create(id, parent string) error - Remove(id string) error - - Get(id string) (dir string, err error) - Put(id string) - Exists(id string) bool - - Status() [][2]string - - Cleanup() error -} - -type Differ interface { - Diff(id string) (archive.Archive, error) - Changes(id string) ([]archive.Change, error) - ApplyDiff(id string, diff archive.ArchiveReader) error - DiffSize(id string) (bytes int64, err error) -} - -var ( - DefaultDriver string - // All registred drivers - drivers map[string]InitFunc - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "devicemapper", - "vfs", - // experimental, has to be enabled manually for now - "btrfs", - } -) - -func init() { - drivers = make(map[string]InitFunc) -} - -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -func GetDriver(name, home string) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(path.Join(home, name)) - } - return nil, fmt.Errorf("No such driver: %s", name) -} - -func New(root string) (driver Driver, err error) { - for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { - if name != "" { - return GetDriver(name, root) - } - } - - // Check for priority drivers first - for _, name := range priority { - if driver, err = GetDriver(name, root); err != nil { - utils.Debugf("Error loading driver %s: %s", name, err) - continue - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for _, initFunc := range drivers { - if driver, err = initFunc(root); err != nil { - continue - } - return driver, nil - } - return nil, err -} diff -Nru docker.io-0.9.1~dfsg1/graphdriver/vfs/driver.go docker.io-1.3.2~dfsg1/graphdriver/vfs/driver.go --- docker.io-0.9.1~dfsg1/graphdriver/vfs/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graphdriver/vfs/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -package vfs - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "os" - "os/exec" - "path" -) - -func init() { - graphdriver.Register("vfs", Init) -} - -func Init(home string) (graphdriver.Driver, error) { - d := &Driver{ - home: home, - } - return d, nil -} - -type Driver struct { - home string -} - -func (d *Driver) String() string { - return "vfs" -} - -func (d *Driver) Status() [][2]string { - return nil -} - -func (d *Driver) Cleanup() error { - return nil -} - -func copyDir(src, dst string) error { - if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil { - return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) - } - return nil -} - -func (d *Driver) Create(id string, parent string) error { - dir := d.dir(id) - if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { - return err - } - if err := os.Mkdir(dir, 0700); err != nil { - return err - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent) - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - if err := copyDir(parentDir, dir); err != nil { - return err - } - return nil -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, "dir", path.Base(id)) -} - -func (d *Driver) Remove(id string) error { - if _, err := os.Stat(d.dir(id)); err != nil { - return err - } - return os.RemoveAll(d.dir(id)) -} - -func (d *Driver) Get(id string) (string, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return "", err - } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - return dir, nil -} - -func (d *Driver) Put(id string) { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here -} - -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff -Nru docker.io-0.9.1~dfsg1/graph.go docker.io-1.3.2~dfsg1/graph.go --- docker.io-0.9.1~dfsg1/graph.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/graph.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,407 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" -) - -// A Graph is a store for versioned filesystem images and the relationship between them. -type Graph struct { - Root string - idIndex *utils.TruncIndex - driver graphdriver.Driver -} - -// NewGraph instantiates a new graph at the given root path in the filesystem. -// `root` will be created if it doesn't exist. -func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { - abspath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - // Create the root directory if it doesn't exists - if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - - graph := &Graph{ - Root: abspath, - idIndex: utils.NewTruncIndex(), - driver: driver, - } - if err := graph.restore(); err != nil { - return nil, err - } - return graph, nil -} - -func (graph *Graph) restore() error { - dir, err := ioutil.ReadDir(graph.Root) - if err != nil { - return err - } - for _, v := range dir { - id := v.Name() - if graph.driver.Exists(id) { - graph.idIndex.Add(id) - } - } - utils.Debugf("Restored %d elements", len(dir)) - return nil -} - -// FIXME: Implement error subclass instead of looking at the error text -// Note: This is the way golang implements os.IsNotExists on Plan9 -func (graph *Graph) IsNotExist(err error) bool { - return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) -} - -// Exists returns true if an image is registered at the given id. -// If the image doesn't exist or if an error is encountered, false is returned. -func (graph *Graph) Exists(id string) bool { - if _, err := graph.Get(id); err != nil { - return false - } - return true -} - -// Get returns the image with the given id, or an error if the image doesn't exist. -func (graph *Graph) Get(name string) (*Image, error) { - id, err := graph.idIndex.Get(name) - if err != nil { - return nil, err - } - // FIXME: return nil when the image doesn't exist, instead of an error - img, err := LoadImage(graph.imageRoot(id)) - if err != nil { - return nil, err - } - if img.ID != id { - return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) - } - img.graph = graph - - if img.Size < 0 { - rootfs, err := graph.driver.Get(img.ID) - if err != nil { - return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) - } - defer graph.driver.Put(img.ID) - - var size int64 - if img.Parent == "" { - if size, err = utils.TreeSize(rootfs); err != nil { - return nil, err - } - } else { - parentFs, err := graph.driver.Get(img.Parent) - if err != nil { - return nil, err - } - changes, err := archive.ChangesDirs(rootfs, parentFs) - if err != nil { - return nil, err - } - size = archive.ChangesSize(rootfs, changes) - } - - img.Size = size - if err := img.SaveSize(graph.imageRoot(id)); err != nil { - return nil, err - } - } - return img, nil -} - -// Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) { - img := &Image{ - ID: GenerateID(), - Comment: comment, - Created: time.Now().UTC(), - DockerVersion: dockerversion.VERSION, - Author: author, - Config: config, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - if container != nil { - img.Parent = container.Image - img.Container = container.ID - img.ContainerConfig = *container.Config - } - if err := graph.Register(nil, layerData, img); err != nil { - return nil, err - } - return img, nil -} - -// Register imports a pre-existing image into the graph. -// FIXME: pass img as first argument -func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) { - defer func() { - // If any error occurs, remove the new dir from the driver. - // Don't check for errors since the dir might not have been created. - // FIXME: this leaves a possible race condition. - if err != nil { - graph.driver.Remove(img.ID) - } - }() - if err := ValidateID(img.ID); err != nil { - return err - } - // (This is a convenience to save time. Race conditions are taken care of by os.Rename) - if graph.Exists(img.ID) { - return fmt.Errorf("Image %s already exists", img.ID) - } - - // Ensure that the image root does not exist on the filesystem - // when it is not registered in the graph. - // This is common when you switch from one graph driver to another - if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) { - return err - } - - // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. - // (the graph is the source of truth). - // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. - // (FIXME: make that mandatory for drivers). - graph.driver.Remove(img.ID) - - tmp, err := graph.Mktemp("") - defer os.RemoveAll(tmp) - if err != nil { - return fmt.Errorf("Mktemp failed: %s", err) - } - - // Create root filesystem in the driver - if err := graph.driver.Create(img.ID, img.Parent); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) - } - // Mount the root filesystem so we can apply the diff/layer - rootfs, err := graph.driver.Get(img.ID) - if err != nil { - return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) - } - defer graph.driver.Put(img.ID) - img.graph = graph - if err := StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { - return err - } - // Commit - if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil { - return err - } - graph.idIndex.Add(img.ID) - return nil -} - -// TempLayerArchive creates a temporary archive of the given image's filesystem layer. -// The archive is stored on disk and will be automatically deleted as soon as has been read. -// If output is not nil, a human-readable progress bar will be written to it. -// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? -func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { - image, err := graph.Get(id) - if err != nil { - return nil, err - } - tmp, err := graph.Mktemp("") - if err != nil { - return nil, err - } - a, err := image.TarLayer() - if err != nil { - return nil, err - } - progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") - defer progress.Close() - return archive.NewTempArchive(progress, tmp) -} - -// Mktemp creates a temporary sub-directory inside the graph's filesystem. -func (graph *Graph) Mktemp(id string) (string, error) { - dir := path.Join(graph.Root, "_tmp", GenerateID()) - if err := os.MkdirAll(dir, 0700); err != nil { - return "", err - } - return dir, nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string) error { - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerinit": "file", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - // "var/run": "dir", - // "var/lock": "dir", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = path.Join(prev, p) - syscall.Unlink(path.Join(initLayer, prev)) - } - - if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - switch typ { - case "dir": - if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { - return err - } - case "file": - if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { - return err - } - f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} - -// Check if given error is "not empty". -// Note: this is the way golang does it internally with os.IsNotExists. -func isNotEmpty(err error) bool { - switch pe := err.(type) { - case nil: - return false - case *os.PathError: - err = pe.Err - case *os.LinkError: - err = pe.Err - } - return strings.Contains(err.Error(), " not empty") -} - -// Delete atomically removes an image from the graph. -func (graph *Graph) Delete(name string) error { - id, err := graph.idIndex.Get(name) - if err != nil { - return err - } - tmp, err := graph.Mktemp("") - if err != nil { - return err - } - graph.idIndex.Delete(id) - err = os.Rename(graph.imageRoot(id), tmp) - if err != nil { - return err - } - // Remove rootfs data from the driver - graph.driver.Remove(id) - // Remove the trashed image directory - return os.RemoveAll(tmp) -} - -// Map returns a list of all images in the graph, addressable by ID. -func (graph *Graph) Map() (map[string]*Image, error) { - images := make(map[string]*Image) - err := graph.walkAll(func(image *Image) { - images[image.ID] = image - }) - if err != nil { - return nil, err - } - return images, nil -} - -// walkAll iterates over each image in the graph, and passes it to a handler. -// The walking order is undetermined. -func (graph *Graph) walkAll(handler func(*Image)) error { - files, err := ioutil.ReadDir(graph.Root) - if err != nil { - return err - } - for _, st := range files { - if img, err := graph.Get(st.Name()); err != nil { - // Skip image - continue - } else if handler != nil { - handler(img) - } - } - return nil -} - -// ByParent returns a lookup table of images by their parent. -// If an image of id ID has 3 children images, then the value for key ID -// will be a list of 3 images. -// If an image has no children, it will not have an entry in the table. -func (graph *Graph) ByParent() (map[string][]*Image, error) { - byParent := make(map[string][]*Image) - err := graph.walkAll(func(image *Image) { - parent, err := graph.Get(image.Parent) - if err != nil { - return - } - if children, exists := byParent[parent.ID]; exists { - byParent[parent.ID] = append(children, image) - } else { - byParent[parent.ID] = []*Image{image} - } - }) - return byParent, err -} - -// Heads returns all heads in the graph, keyed by id. -// A head is an image which is not the parent of another image in the graph. -func (graph *Graph) Heads() (map[string]*Image, error) { - heads := make(map[string]*Image) - byParent, err := graph.ByParent() - if err != nil { - return nil, err - } - err = graph.walkAll(func(image *Image) { - // If it's not in the byParent lookup table, then - // it's not a parent -> so it's a head! - if _, exists := byParent[image.ID]; !exists { - heads[image.ID] = image - } - }) - return heads, err -} - -func (graph *Graph) imageRoot(id string) string { - return path.Join(graph.Root, id) -} - -func (graph *Graph) Driver() graphdriver.Driver { - return graph.driver -} diff -Nru docker.io-0.9.1~dfsg1/hack/bootcamp/README.md docker.io-1.3.2~dfsg1/hack/bootcamp/README.md --- docker.io-0.9.1~dfsg1/hack/bootcamp/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/bootcamp/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -# Docker maintainer bootcamp - -## Introduction: we need more maintainers - -Docker is growing incredibly fast. At the time of writing, it has received over 200 contributions from 90 people, -and its API is used by dozens of 3rd-party tools. Over 1,000 issues have been opened. As the first production deployments -start going live, the growth will only accelerate. - -Also at the time of writing, Docker has 3 full-time maintainers, and 7 part-time subsystem maintainers. If docker -is going to live up to the expectations, we need more than that. - -This document describes a *bootcamp* to guide and train volunteers interested in helping the project, either with individual -contributions, maintainer work, or both. - -This bootcamp is an experiment. If you decide to go through it, consider yourself an alpha-tester. You should expect quirks, -and report them to us as you encounter them to help us smooth out the process. - - -## How it works - -The maintainer bootcamp is a 12-step program - one step for each of the maintainer's responsibilities. The aspiring maintainer must -validate all 12 steps by 1) studying it, 2) practicing it, and 3) getting endorsed for it. - -Steps are all equally important and can be validated in any order. Validating all 12 steps is a pre-requisite for becoming a core -maintainer, but even 1 step will make you a better contributor! - -### List of steps - -#### 1) Be a power user - -Use docker daily, build cool things with it, know its quirks inside and out. - - -#### 2) Help users - -Answer questions on irc, twitter, email, in person. - - -#### 3) Manage the bug tracker - -Help triage tickets - ask the right questions, find duplicates, reference relevant resources, know when to close a ticket when necessary, take the time to go over older tickets. - - -#### 4) Improve the documentation - -Follow the documentation from scratch regularly and make sure it is still up-to-date. Find and fix inconsistencies. Remove stale information. Find a frequently asked question that is not documented. Simplify the content and the form. - - -#### 5) Evangelize the principles of docker - -Understand what the underlying goals and principle of docker are. Explain design decisions based on what docker is, and what it is not. When someone is not using docker, find how docker can be valuable to them. If they are using docker, find how they can use it better. - - -#### 6) Fix bugs - -Self-explanatory. Contribute improvements to docker which solve defects. Bugfixes should be well-tested, and prioritized by impact to the user. - - -#### 7) Improve the testing infrastructure - -Automated testing is complicated and should be perpetually improved. Invest time to improve the current tooling. Refactor existing tests, create new ones, make testing more accessible to developers, add new testing capabilities (integration tests, mocking, stress test...), improve integration between tests and documentation... - - -#### 8) Contribute features - -Improve docker to do more things, or get better at doing the same things. Features should be well-tested, not break existing APIs, respect the project goals. They should make the user's life measurably better. Features should be discussed ahead of time to avoid wasting time and duplicating effort. - - -#### 9) Refactor internals - -Improve docker to repay technical debt. Simplify code layout, improve performance, add missing comments, reduce the number of files and functions, rename functions and variables to be more readable, go over FIXMEs, etc. - -#### 10) Review and merge contributions - -Review pull requests in a timely manner, review code in detail and offer feedback. Keep a high bar without being pedantic. Share the load of testing and merging pull requests. - -#### 11) Release - -Manage a release of docker from beginning to end. Tests, final review, tags, builds, upload to mirrors, distro packaging, etc. - -#### 12) Train other maintainers - -Contribute to training other maintainers. Give advice, delegate work, help organize the bootcamp. This also means contribute to the maintainer's manual, look for ways to improve the project organization etc. - -### How to study a step - -### How to practice a step - -### How to get endorsed for a step - - diff -Nru docker.io-0.9.1~dfsg1/hack/CONTRIBUTORS.md docker.io-1.3.2~dfsg1/hack/CONTRIBUTORS.md --- docker.io-0.9.1~dfsg1/hack/CONTRIBUTORS.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/CONTRIBUTORS.md 2014-11-24 17:38:01.000000000 +0000 @@ -4,20 +4,58 @@ started. They are probably not perfect, please let us know if anything feels wrong or incomplete. +## Topics + +* [Security Reports](#security-reports) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-issues) +* [Build Environment](#build-environment) +* [Contribution Guidelines](#contribution-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Security Reports + +Please **DO NOT** file an issue for security related issues. Please send your +reports to [security@docker.com](mailto:security@docker.com) instead. + +## Design and Cleanup Proposals + +When considering a design proposal, we are looking for: + +* A description of the problem this design proposal solves +* An issue -- not a pull request -- that describes what you will take action on + * Please prefix your issue with `Proposal:` in the title +* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) + before reporting a new issue. You can always pair with someone if you both + have the same idea. + +When considering a cleanup task, we are looking for: + +* A description of the refactors made + * Please note any logic changes if necessary +* A pull request with the code + * Please prefix your PR's title with `Cleanup:` so we can quickly address it. + * Your pull request must remain up to date with master, so rebase as necessary. + ## Reporting Issues -When reporting [issues](https://github.com/dotcloud/docker/issues) -on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), -the output of `uname -a` and the output of `docker version` along with -the output of `docker info`. Please include the steps required to reproduce -the problem if possible and applicable. -This information will help us review and fix your issue faster. +When reporting [issues](https://github.com/docker/docker/issues) on +GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). +Please include: + +* The output of `uname -a`. +* The output of `docker version`. +* The output of `docker -D info`. + +Please also include the steps required to reproduce the problem if +possible and applicable. This information will help us review and fix +your issue faster. ## Build Environment For instructions on setting up your development environment, please see our dedicated [dev environment setup -docs](http://docs.docker.io/en/latest/contributing/devenvironment/). +docs](http://docs.docker.com/contributing/devenvironment/). ## Contribution guidelines @@ -34,7 +72,7 @@ We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement -that feature *on top of* docker. +that feature *on top of* Docker. ### Discuss your design on the mailing list @@ -48,7 +86,7 @@ ### Create issues... Any significant improvement should be documented as [a GitHub -issue](https://github.com/dotcloud/docker/issues) before anybody +issue](https://github.com/docker/docker/issues) before anybody starts working on it. ### ...but check for existing issues first! @@ -60,12 +98,12 @@ ### Conventions -Fork the repo and make changes on your fork in a feature branch: +Fork the repository and make changes on your fork in a feature branch: -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the + issue. - If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. + intentions, and name it XXXX-something where XXXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. Run the full test suite on @@ -73,22 +111,19 @@ Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. +well as a clean documentation build. See `docs/README.md` for more +information on building the docs and how they get released. Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `go fmt` before committing your changes. Most -editors have plugins that do this automatically, and there's also a git -pre-commit hook: - -``` -curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit -``` +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. -Pull requests must not contain commits from other users or branches. +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be @@ -96,28 +131,33 @@ request automatically, but the reviewers will not be notified unless you comment. +Pull requests must be cleanly rebased ontop of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + Before the pull request is merged, make sure that you squash your commits into logical units of work using `git rebase -i` and `git push -f`. After every commit the test suite should be passing. Include documentation changes in the same commit so that a revert would remove all traces of the feature or fix. -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. +Commits that fix or close an issue should include a reference like +`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the +issue when merged. -Add your name to the AUTHORS file, but make sure the list is sorted and your -name and email address match your git configuration. The AUTHORS file is -regenerated occasionally from the git commit history, so a mismatch may result -in your changes being overwritten. +Please do not add yourself to the `AUTHORS` file, as it is regenerated +regularly from the Git history. ### Merge approval -Docker maintainers use LGTM (looks good to me) in comments on the code review +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects docs/ and registry/, it -needs an absolute majority from the maintainers of docs/ AND, separately, an -absolute majority of the maintainers of registry. +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) @@ -126,66 +166,151 @@ The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below: +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): ``` -Docker Developer Certificate of Origin 1.1 +Developer Certificate of Origin +Version 1.1 -By making a contribution to the Docker Project ("Project"), I represent and -warrant that: - -a. The contribution was created in whole or in part by me and I have the right -to submit the contribution on my own behalf or on behalf of a third party who -has authorized me to submit this contribution to the Project; or - -b. The contribution is based upon previous work that, to the best of my -knowledge, is covered under an appropriate open source license and I have the -right and authorization to submit that work with modifications, whether -created in whole or in part by me, under the same open source license (unless -I am permitted to submit under a different license) that I have identified in -the contribution; or - -c. The contribution was provided directly to me by some other person who -represented and warranted (a) or (b) and I have not modified it. - -d. I understand and agree that this Project and the contribution are publicly -known and that a record of the contribution (including all personal -information I submit with it, including my sign-off record) is maintained -indefinitely and may be redistributed consistent with this Project or the open -source license(s) involved. +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. ``` -then you just add a line to every git commit message: +Then you just add a line to every git commit message: - Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + Signed-off-by: Joe Smith -using your real name (sorry, no pseudonyms or anonymous contributions.) +Using your real name (sorry, no pseudonyms or anonymous contributions.) -One way to automate this, is customise your get ``commit.template`` by adding -a ``prepare-commit-msg`` hook to your docker checkout: +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. -``` -curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg -``` - -* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` +Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still +accepted, so there is no need to update outstanding pull requests to the new +format right away, but please do adjust your processes for future contributions. #### Small patch exception There are several exceptions to the signing requirement. Currently these are: * Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. -If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) +If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com) ### How can I become a maintainer? -* Step 1: learn the component inside out -* Step 2: make yourself useful by contributing code, bugfixes, support etc. -* Step 3: volunteer on the irc channel (#docker@freenode) -* Step 4: propose yourself at a scheduled docker meeting in #docker-dev +* Step 1: Learn the component inside out +* Step 2: Make yourself useful by contributing code, bug fixes, support etc. +* Step 3: Volunteer on the IRC channel (#docker at Freenode) +* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +### IRC Meetings + +There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones. +Anybody can ask for a topic to be discussed prior to the meeting. + +If you feel the conversation is going off-topic, feel free to point it out. + +For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes). +They also contain all the notes from previous meetings. + +## Docker Community Guidelines + +We want to keep the Docker community awesome, growing and collaborative. We +need your help to keep it that way. To help with this we've come up with some +general guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: no + regional, racial, gender, or other abuse will be tolerated. We like nice people + way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community + feel welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break the + law. + +* Stay on topic: Make sure that you are posting to the correct channel + and avoid off-topic discussions. Remember when you update an issue or + respond to an email you are potentially sending to a large number of + people. Please consider this before you update. Also remember that + nobody likes spam. + +### Guideline Violations — 3 Strikes Method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't + hold a grudge. + +* People who commit minor infractions will get some education, + rather than hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how + much you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or + forgiveness. -Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. -You don't have to be a maintainer to make a difference on the project! +* Contact james@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with + a fair solution if there has been a misunderstanding. diff -Nru docker.io-0.9.1~dfsg1/hack/dind docker.io-1.3.2~dfsg1/hack/dind --- docker.io-0.9.1~dfsg1/hack/dind 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/dind 2014-11-24 17:38:01.000000000 +0000 @@ -1,40 +1,43 @@ #!/bin/bash +set -e # DinD: a wrapper script which allows docker to be run inside a docker container. -# Original version by Jerome Petazzoni -# See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/ +# Original version by Jerome Petazzoni +# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode -# ('docker run -privileged', introduced in docker 0.6). +# ('docker run --privileged', introduced in docker 0.6). # Usage: dind CMD [ARG...] +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + # First, make sure that cgroups are mounted correctly. -CGROUP=/sys/fs/cgroup +CGROUP=/cgroup -[ -d $CGROUP ] || - mkdir $CGROUP +mkdir -p "$CGROUP" -mountpoint -q $CGROUP || +if ! mountpoint -q "$CGROUP"; then mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo "Could not make a tmpfs mount. Did you use -privileged?" + echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' exit 1 } +fi -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security -then - mount -t securityfs none /sys/kernel/security || { - echo "Could not mount /sys/kernel/security." - echo "AppArmor detection and -privileged mode might break." - } +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and -privileged mode might break.' + } fi # Mount the cgroup hierarchies exactly as they are in the parent system. -for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) -do - [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS - mountpoint -q $CGROUP/$SUBSYS || - mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS +for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do + mkdir -p "$CGROUP/$SUBSYS" + if ! mountpoint -q $CGROUP/$SUBSYS; then + mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" + fi # The two following sections address a bug which manifests itself # by a cryptic "lxc-start: no ns_cgroup option specified" when @@ -49,45 +52,37 @@ # Systemd and OpenRC (and possibly others) both create such a # cgroup. To avoid the aforementioned bug, we symlink "foo" to # "name=foo". This shouldn't have any adverse effect. - echo $SUBSYS | grep -q ^name= && { - NAME=$(echo $SUBSYS | sed s/^name=//) - ln -s $SUBSYS $CGROUP/$NAME - } + name="${SUBSYS#name=}" + if [ "$name" != "$SUBSYS" ]; then + ln -s "$SUBSYS" "$CGROUP/$name" + fi # Likewise, on at least one system, it has been reported that # systemd would mount the CPU and CPU accounting controllers # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" # but on a directory called "cpu,cpuacct" (note the inversion # in the order of the groups). This tries to work around it. - [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct + if [ "$SUBSYS" = 'cpuacct,cpu' ]; then + ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" + fi done # Note: as I write those lines, the LXC userland tools cannot setup # a "sub-container" properly if the "devices" cgroup is not in its # own hierarchy. Let's detect this and issue a warning. -grep -q :devices: /proc/1/cgroup || - echo "WARNING: the 'devices' cgroup should be in its own hierarchy." -grep -qw devices /proc/1/cgroup || - echo "WARNING: it looks like the 'devices' cgroup is not mounted." - -# Now, close extraneous file descriptors. -pushd /proc/self/fd >/dev/null -for FD in * -do - case "$FD" in - # Keep stdin/stdout/stderr - [012]) - ;; - # Nuke everything else - *) - eval exec "$FD>&-" - ;; - esac -done -popd >/dev/null +if ! grep -q :devices: /proc/1/cgroup; then + echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' +fi +if ! grep -qw devices /proc/1/cgroup; then + echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' +fi # Mount /tmp mount -t tmpfs none /tmp -[ "$1" ] && exec "$@" -echo "You probably want to run hack/make.sh, or maybe a shell?" +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff -Nru docker.io-0.9.1~dfsg1/hack/generate-authors.sh docker.io-1.3.2~dfsg1/hack/generate-authors.sh --- docker.io-0.9.1~dfsg1/hack/generate-authors.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/generate-authors.sh 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | sort -uf +} > AUTHORS diff -Nru docker.io-0.9.1~dfsg1/hack/getmaintainer.sh docker.io-1.3.2~dfsg1/hack/getmaintainer.sh --- docker.io-0.9.1~dfsg1/hack/getmaintainer.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/getmaintainer.sh 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,5 @@ -#!/bin/sh +#!/usr/bin/env bash +set -e if [ $# -ne 1 ]; then echo >&2 "Usage: $0 PATH" @@ -34,6 +35,7 @@ fi done; } < MAINTAINERS + break fi if [ -d .git ]; then break @@ -46,13 +48,15 @@ PRIMARY="${MAINTAINERS[0]}" PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) +LGTM_COUNT=${#MAINTAINERS[@]} +LGTM_COUNT=$((LGTM_COUNT%2 +1)) firstname() { echo $1 | cut -d' ' -f1 } -echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1. Assign pull requests to him." -echo "$(firstname $PRIMARY) may assign pull requests to the following secondary maintainers:" +echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged." +echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1." for SECONDARY in "${MAINTAINERS[@]:1}"; do echo "--- $SECONDARY" done diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/buildbot/github.py docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/buildbot/github.py --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/buildbot/github.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/buildbot/github.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -#!/usr/bin/env python -""" -github_buildbot.py is based on git_buildbot.py - -github_buildbot.py will determine the repository information from the JSON -HTTP POST it receives from github.com and build the appropriate repository. -If your github repository is private, you must add a ssh key to the github -repository for the user who initiated the build on the buildslave. - -""" - -import re -import datetime -from twisted.python import log -import calendar - -try: - import json - assert json -except ImportError: - import simplejson as json - -# python is silly about how it handles timezones -class fixedOffset(datetime.tzinfo): - """ - fixed offset timezone - """ - def __init__(self, minutes, hours, offsetSign = 1): - self.minutes = int(minutes) * offsetSign - self.hours = int(hours) * offsetSign - self.offset = datetime.timedelta(minutes = self.minutes, - hours = self.hours) - - def utcoffset(self, dt): - return self.offset - - def dst(self, dt): - return datetime.timedelta(0) - -def convertTime(myTestTimestamp): - #"1970-01-01T00:00:00+00:00" - # Normalize myTestTimestamp - if myTestTimestamp[-1] == 'Z': - myTestTimestamp = myTestTimestamp[:-1] + '-00:00' - matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)') - result = matcher.match(myTestTimestamp) - (year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \ - result.groups() - if offsetsign == '+': - offsetsign = 1 - else: - offsetsign = -1 - - offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign ) - myDatetime = datetime.datetime( int(year), - int(month), - int(day), - int(hour), - int(minute), - int(second), - 0, - offsetTimezone) - return calendar.timegm( myDatetime.utctimetuple() ) - -def getChanges(request, options = None): - """ - Reponds only to POST events and starts the build process - - :arguments: - request - the http request object - """ - payload = json.loads(request.args['payload'][0]) - import urllib,datetime - fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19] - # Github event debug - # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) - - if 'pull_request' in payload: - user = payload['pull_request']['user']['login'] - repo = payload['pull_request']['head']['repo']['name'] - repo_url = payload['pull_request']['head']['repo']['html_url'] - else: - user = payload['repository']['owner']['name'] - repo = payload['repository']['name'] - repo_url = payload['repository']['url'] - project = request.args.get('project', None) - if project: - project = project[0] - elif project is None: - project = '' - # This field is unused: - #private = payload['repository']['private'] - changes = process_change(payload, user, repo, repo_url, project) - log.msg("Received %s changes from github" % len(changes)) - return (changes, 'git') - -def process_change(payload, user, repo, repo_url, project): - """ - Consumes the JSON as a python object and actually starts the build. - - :arguments: - payload - Python Object that represents the JSON sent by GitHub Service - Hook. - """ - changes = [] - - newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha'] - refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref'] - - # We only care about regular heads, i.e. branches - match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname) - if not match: - log.msg("Ignoring refname `%s': Not a branch" % refname) - return [] - - branch = match.groups()[1] - if re.match(r"^0*$", newrev): - log.msg("Branch `%s' deleted, ignoring" % branch) - return [] - else: - if 'pull_request' in payload: - if payload['action'] == 'closed': - log.msg("PR#{} closed, ignoring".format(payload['number'])) - return [] - changes = [{ - 'category' : 'github_pullrequest', - 'who' : '{0} - PR#{1}'.format(user,payload['number']), - 'files' : [], - 'comments' : payload['pull_request']['title'], - 'revision' : newrev, - 'when' : convertTime(payload['pull_request']['updated_at']), - 'branch' : branch, - 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev), - 'repository' : repo_url, - 'project' : project }] - return changes - for commit in payload['commits']: - files = [] - if 'added' in commit: - files.extend(commit['added']) - if 'modified' in commit: - files.extend(commit['modified']) - if 'removed' in commit: - files.extend(commit['removed']) - when = convertTime( commit['timestamp']) - log.msg("New revision: %s" % commit['id'][:8]) - chdict = dict( - who = commit['author']['name'] - + " <" + commit['author']['email'] + ">", - files = files, - comments = commit['message'], - revision = commit['id'], - when = when, - branch = branch, - revlink = commit['url'], - repository = repo_url, - project = project) - changes.append(chdict) - return changes diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/buildbot/master.cfg docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/buildbot/master.cfg --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/buildbot/master.cfg 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/buildbot/master.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -import os, re -from buildbot.buildslave import BuildSlave -from buildbot.schedulers.forcesched import ForceScheduler -from buildbot.schedulers.basic import SingleBranchScheduler -from buildbot.schedulers.timed import Nightly -from buildbot.changes import filter -from buildbot.config import BuilderConfig -from buildbot.process.factory import BuildFactory -from buildbot.process.properties import Property -from buildbot.steps.shell import ShellCommand -from buildbot.status import html, words -from buildbot.status.web import authz, auth -from buildbot.status.mail import MailNotifier - - -def ENV(x): - '''Promote an environment variable for global use returning its value''' - retval = os.environ.get(x, '') - globals()[x] = retval - return retval - - -class TestCommand(ShellCommand): - '''Extend ShellCommand with optional summary logs''' - def __init__(self, *args, **kwargs): - super(TestCommand, self).__init__(*args, **kwargs) - - def createSummary(self, log): - exit_status = re.sub(r'.+\n\+ exit (\d+).+', - r'\1', log.getText()[-100:], flags=re.DOTALL) - if exit_status != '0': - return - # Infer coverage path from log - if '+ COVERAGE_PATH' in log.getText(): - path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+', - r'\2/\1', log.getText(), flags=re.DOTALL) - url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) - self.addURL('coverage', url) - elif 'COVERAGE_FILE' in log.getText(): - path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+', - r'\2/\1', log.getText(), flags=re.DOTALL) - url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) - self.addURL('coverage', url) - - -PORT_WEB = 8000 # Buildbot webserver port -PORT_GITHUB = 8011 # Buildbot github hook port -PORT_MASTER = 9989 # Port where buildbot master listen buildworkers - -BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB) -DOCKER_REPO = 'https://github.com/docker-test/docker' -DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO) -REGISTRY_REPO = 'https://github.com/docker-test/docker-registry' -REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO) -if ENV('DEPLOYMENT') == 'staging': - BUILDBOT_URL = "//docker-ci-stage.docker.io/" -if ENV('DEPLOYMENT') == 'production': - BUILDBOT_URL = '//docker-ci.docker.io/' - DOCKER_REPO = 'https://github.com/dotcloud/docker' - DOCKER_TEST_ARGV = '' - REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry' - REGISTRY_TEST_ARGV = '' - -# Credentials set by setup.sh from deployment.py -ENV('WEB_USER') -ENV('WEB_IRC_PWD') -ENV('BUILDBOT_PWD') -ENV('SMTP_USER') -ENV('SMTP_PWD') -ENV('EMAIL_RCP') -ENV('IRC_CHANNEL') - - -c = BuildmasterConfig = {} - -c['title'] = "docker-ci" -c['titleURL'] = "waterfall" -c['buildbotURL'] = BUILDBOT_URL -c['db'] = {'db_url':"sqlite:///state.sqlite"} -c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)] -c['slavePortnum'] = PORT_MASTER - - -# Schedulers -c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[ - 'docker', 'docker-registry', 'nightlyrelease', 'backup'])] -c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None, - change_filter=filter.ChangeFilter(branch='master', - repository=DOCKER_REPO), builderNames=['docker'])] -c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None, - change_filter=filter.ChangeFilter(branch='master', - repository=REGISTRY_REPO), builderNames=['docker-registry'])] -c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None, - change_filter=filter.ChangeFilter(category='github_pullrequest', - project='docker'), builderNames=['docker-pr'])] -c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None, - change_filter=filter.ChangeFilter(category='github_pullrequest', - project='docker-registry'), builderNames=['docker-registry-pr'])] -c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[ - 'nightlyrelease', 'backup'], hour=7, minute=00)] - - -# Builders - -# Backup -factory = BuildFactory() -factory.addStep(TestCommand(description='backup', logEnviron=False, - usePTY=True, command='/docker-ci/tool/backup.py')) -c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'], - factory=factory)] - -# Docker test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker', logEnviron=False, - usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV))) -c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'], - factory=factory)] - -# Docker pull request test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-pr', logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/docker', - Property('revision'), Property('repository'), Property('branch')])) -c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'], - factory=factory)] - -# docker-registry test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-registry', logEnviron=False, - usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV))) -c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'], - factory=factory)] - -# Docker registry pull request test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/docker-registry', - Property('revision'), Property('repository'), Property('branch')])) -c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'], - factory=factory)] - -# Docker nightly release -factory = BuildFactory() -factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/nightlyrelease'])) -c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], - factory=factory)] - -# Status -authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]), - forceBuild='auth') -c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] -c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True, - change_hook_dialects={ 'github': True })) -c['status'].append(MailNotifier(fromaddr='docker-test@docker.io', - sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP], - mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True, - smtpUser=SMTP_USER, smtpPassword=SMTP_PWD)) -c['status'].append(words.IRC("irc.freenode.net", "dockerqabot", - channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True, - notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1})) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -docker-ci: - image: "docker-ci/docker-ci" - release_name: "docker-ci-0.5.6" - ports: ["80","2222:22","8011:8011"] - register: "80" - volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] - command: [] - env: - - "DEPLOYMENT=production" - - "IRC_CHANNEL=docker-testing" - - "BACKUP_BUCKET=backup-ci" - - "$WEB_USER" - - "$WEB_IRC_PWD" - - "$BUILDBOT_PWD" - - "$AWS_ACCESS_KEY" - - "$AWS_SECRET_KEY" - - "$GPG_PASSPHRASE" - - "$BACKUP_AWS_ID" - - "$BACKUP_AWS_SECRET" - - "$SMTP_USER" - - "$SMTP_PWD" - - "$EMAIL_RCP" diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/prod/settings.yml docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/prod/settings.yml --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/prod/settings.yml 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/prod/settings.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -default: - hipaches: ['192.168.100.67:6379'] - daemons: ['192.168.100.67:4243'] - use_ssh: False - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -docker-ci: - image: "docker-ci/docker-ci" - release_name: "docker-ci-stage" - ports: ["80","2222:22","8011:8011"] - register: "80" - volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] - command: [] - env: - - "DEPLOYMENT=staging" - - "IRC_CHANNEL=docker-testing-staging" - - "BACKUP_BUCKET=ci-backup-stage" - - "$BACKUP_AWS_ID" - - "$BACKUP_AWS_SECRET" - - "$WEB_USER" - - "$WEB_IRC_PWD" - - "$BUILDBOT_PWD" - - "$AWS_ACCESS_KEY" - - "$AWS_SECRET_KEY" - - "$GPG_PASSPHRASE" - - "$SMTP_USER" - - "$SMTP_PWD" - - "$EMAIL_RCP" diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/stage/settings.yml docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/stage/settings.yml --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dcr/stage/settings.yml 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dcr/stage/settings.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -default: - hipaches: ['192.168.100.65:6379'] - daemons: ['192.168.100.65:4243'] - use_ssh: False - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -#!/bin/bash - -export PATH='/go/bin':$PATH -export DOCKER_PATH='/go/src/github.com/dotcloud/docker' - -# Signal coverage report name, parsed by docker-ci -set -x -COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S") -set +x - -REPORTS="/data/$COVERAGE_PATH" -INDEX="$REPORTS/index.html" - -# Test docker -cd $DOCKER_PATH -./hack/make.sh test; exit_status=$? -PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles" - -if [ "$exit_status" -eq "0" ]; then - # Download coverage dependencies - go get github.com/axw/gocov/gocov - go get -u github.com/matm/gocov-html - - # Create coverage report - mkdir -p $REPORTS - cd $PROFILE_PATH - cat > $INDEX << "EOF" - - - - - -Docker Coverage Report - -

    Docker Coverage Report

    - - -EOF - for profile in *; do - gocov convert $profile | gocov-html >$REPORTS/$profile.html - echo "" >> $INDEX - done - echo "
    packagepct
    $profile" >> $INDEX - go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX - echo "
    " >> $INDEX -fi - -# Signal test and coverage result, parsed by docker-ci -set -x -exit $exit_status - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/Dockerfile docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/Dockerfile --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -# DOCKER-VERSION: 0.7.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: docker-ci continuous integration service -# TO_BUILD: docker build -rm -t docker-ci/docker-ci . -# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ -# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci - -from ubuntu:12.04 -maintainer Daniel Mizyrycki - -ENV DEBIAN_FRONTEND noninteractive -RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ - /etc/apt/sources.list; apt-get update -RUN apt-get install -y --no-install-recommends python2.7 python-dev \ - libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx -RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py -RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py - -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 -RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ - /etc/apt/sources.list.d/docker.list; apt-get update -RUN apt-get install -y lxc-docker-0.8.0 -RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto -RUN ln -s /var/socket/docker.sock /run/docker.sock - -ADD . /docker-ci -RUN /docker-ci/setup.sh - -ENTRYPOINT ["supervisord", "-n"] diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/docker docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/docker --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/docker 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/docker 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -x - -PROJECT_NAME=$(basename $0) - -docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ - -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/docker-registry docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/docker-registry --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/docker-registry 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/docker-registry 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -x - -PROJECT_NAME=$(basename $0) - -docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ - -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/nightlyrelease docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/nightlyrelease --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/nightlyrelease 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/nightlyrelease 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -if [ "$DEPLOYMENT" == "production" ]; then - AWS_S3_BUCKET='test.docker.io' -else - AWS_S3_BUCKET='get-staging.docker.io' -fi - -docker run -rm -privileged -v /run:/var/socket \ - -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ - -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/project docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/project --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/dockertest/project 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/dockertest/project 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -x - -PROJECT_NAME=$(basename $0) - -docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ - -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_index.py docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_index.py --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_index.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_index.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -#!/usr/bin/python - -import os -username, password = os.environ['DOCKER_CREDS'].split(':') - -from selenium import webdriver -from selenium.webdriver.common.by import By -from selenium.webdriver.common.keys import Keys -from selenium.webdriver.support.ui import Select -from selenium.common.exceptions import NoSuchElementException -import unittest, time, re - -class Docker(unittest.TestCase): - def setUp(self): - self.driver = webdriver.PhantomJS() - self.driver.implicitly_wait(30) - self.base_url = "http://www.docker.io/" - self.verificationErrors = [] - self.accept_next_alert = True - - def test_docker(self): - driver = self.driver - print "Login into {0} as login user {1} ...".format(self.base_url,username) - driver.get(self.base_url + "/") - driver.find_element_by_link_text("INDEX").click() - driver.find_element_by_link_text("login").click() - driver.find_element_by_id("id_username").send_keys(username) - driver.find_element_by_id("id_password").send_keys(password) - print "Checking login user ..." - driver.find_element_by_css_selector("input[type=\"submit\"]").click() - try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text) - except AssertionError as e: self.verificationErrors.append(str(e)) - print "Login user {0} found".format(username) - - def is_element_present(self, how, what): - try: self.driver.find_element(by=how, value=what) - except NoSuchElementException, e: return False - return True - - def is_alert_present(self): - try: self.driver.switch_to_alert() - except NoAlertPresentException, e: return False - return True - - def close_alert_and_get_its_text(self): - try: - alert = self.driver.switch_to_alert() - alert_text = alert.text - if self.accept_next_alert: - alert.accept() - else: - alert.dismiss() - return alert_text - finally: self.accept_next_alert = True - - def tearDown(self): - self.driver.quit() - self.assertEqual([], self.verificationErrors) - -if __name__ == "__main__": - unittest.main() diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_registry.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_registry.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_registry.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/functionaltests/test_registry.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -#!/bin/sh - -set -x - -# Cleanup -rm -rf docker-registry - -# Setup the environment -export SETTINGS_FLAVOR=test -export DOCKER_REGISTRY_CONFIG=config_test.yml -export PYTHONPATH=$(pwd)/docker-registry/test - -# Get latest docker registry -git clone -q https://github.com/dotcloud/docker-registry.git -cd docker-registry -sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml - -# Get dependencies -pip install -q -r requirements.txt -pip install -q -r test-requirements.txt -pip install -q tox - -# Run registry tests -tox || exit 1 -python -m unittest discover -p s3.py -s test || exit 1 -python -m unittest discover -p workflow.py -s test - diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/MAINTAINERS docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/MAINTAINERS --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Daniel Mizyrycki (@mzdaniel) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/nginx/nginx.conf docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/nginx/nginx.conf --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/nginx/nginx.conf 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/nginx/nginx.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -server { - listen 80; - root /data/docker-ci; - - location / { - proxy_pass http://localhost:8000/; - } - - location /coverage { - root /data/docker-ci; - } -} diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/README.rst docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/README.rst --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/README.rst 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/README.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -========= -docker-ci -========= - -This directory contains docker-ci continuous integration system. -As expected, it is a fully dockerized and deployed using -docker-container-runner. -docker-ci is based on Buildbot, a continuous integration system designed -to automate the build/test cycle. By automatically rebuilding and testing -the tree each time something has changed, build problems are pinpointed -quickly, before other developers are inconvenienced by the failure. -We are running buildbot at Rackspace to verify docker and docker-registry -pass tests, and check for coverage code details. - -docker-ci instance is at https://docker-ci.docker.io/waterfall - -Inside docker-ci container we have the following directory structure: - -/docker-ci source code of docker-ci -/data/backup/docker-ci/ daily backup (replicated over S3) -/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes -/data/buildbot/{master,slave}/ main docker-ci buildbot config and database -/var/socket/{docker.sock} host volume access to docker socket - - -Production deployment -===================== - -:: - - # Clone docker-ci repository - git clone https://github.com/dotcloud/docker - cd docker/hack/infrastructure/docker-ci - - export DOCKER_PROD=[PRODUCTION_SERVER_IP] - - # Create data host volume. (only once) - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - mkdir -p /data/docker-ci/coverage/docker - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - mkdir -p /data/docker-ci/coverage/docker-registry - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - chown -R 1000.1000 /data/docker-ci - - # dcr deployment. Define credentials and special environment dcr variables - # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml ) - export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME] - export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD] - export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD] - export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS] - export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET] - export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE] - export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS] - export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET] - export SMTP_USER=[MAILGUN_SMTP_USERNAME] - export SMTP_PWD=[MAILGUN_SMTP_PASSWORD] - export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] - - # Build docker-ci and testbuilder docker images - docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci . - (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .) - - # Run docker-ci container ( assuming no previous container running ) - (cd dcr/prod; dcr docker-ci.yml start) - (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/deployment.py docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/deployment.py --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/deployment.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/deployment.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ -#!/usr/bin/env python - -'''Deploy docker-ci report container on Digital Ocean. -Usage: - export CONFIG_JSON=' - { "DROPLET_NAME": "Digital_Ocean_dropplet_name", - "DO_CLIENT_ID": "Digital_Ocean_client_id", - "DO_API_KEY": "Digital_Ocean_api_key", - "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id", - "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path", - "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", - "DOCKER_CI_ADDRESS" "user@docker-ci_fqdn_server", - "SMTP_USER": "SMTP_server_user", - "SMTP_PWD": "SMTP_server_password", - "EMAIL_SENDER": "Buildbot_mailing_sender", - "EMAIL_RCP": "Buildbot_mailing_receipient" }' - python deployment.py -''' - -import re, json, requests, base64 -from fabric import api -from fabric.api import cd, run, put, sudo -from os import environ as env -from time import sleep -from datetime import datetime - -# Populate environment variables -CONFIG = json.loads(env['CONFIG_JSON']) -for key in CONFIG: - env[key] = CONFIG[key] - -# Load DOCKER_CI_KEY -env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read() - -DROPLET_NAME = env.get('DROPLET_NAME','report') -TIMEOUT = 120 # Seconds before timeout droplet creation -IMAGE_ID = 1004145 # Docker on Ubuntu 13.04 -REGION_ID = 4 # New York 2 -SIZE_ID = 66 # memory 512MB -DO_IMAGE_USER = 'root' # Image user on Digital Ocean -API_URL = 'https://api.digitalocean.com/' - - -class digital_ocean(): - - def __init__(self, key, client): - '''Set default API parameters''' - self.key = key - self.client = client - self.api_url = API_URL - - def api(self, cmd_path, api_arg={}): - '''Make api call''' - api_arg.update({'api_key':self.key, 'client_id':self.client}) - resp = requests.get(self.api_url + cmd_path, params=api_arg).text - resp = json.loads(resp) - if resp['status'] != 'OK': - raise Exception(resp['error_message']) - return resp - - def droplet_data(self, name): - '''Get droplet data''' - data = self.api('droplets') - data = [droplet for droplet in data['droplets'] - if droplet['name'] == name] - return data[0] if data else {} - -def json_fmt(data): - '''Format json output''' - return json.dumps(data, sort_keys = True, indent = 2) - - -do = digital_ocean(env['DO_API_KEY'], env['DO_CLIENT_ID']) - -# Get DROPLET_NAME data -data = do.droplet_data(DROPLET_NAME) - -# Stop processing if DROPLET_NAME exists on Digital Ocean -if data: - print ('Droplet: {} already deployed. Not further processing.' - .format(DROPLET_NAME)) - exit(1) - -# Create droplet -do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID, - 'image_id':IMAGE_ID, 'size_id':SIZE_ID, - 'ssh_key_ids':[env['DOCKER_KEY_ID']]}) - -# Wait for droplet to be created. -start_time = datetime.now() -while (data.get('status','') != 'active' and ( - datetime.now()-start_time).seconds < TIMEOUT): - data = do.droplet_data(DROPLET_NAME) - print data['status'] - sleep(3) - -# Wait for the machine to boot -sleep(15) - -# Get droplet IP -ip = str(data['ip_address']) -print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip) - -api.env.host_string = ip -api.env.user = DO_IMAGE_USER -api.env.key_filename = env['DOCKER_CI_KEY_PATH'] - -# Correct timezone -sudo('echo "America/Los_Angeles" >/etc/timezone') -sudo('dpkg-reconfigure --frontend noninteractive tzdata') - -# Load JSON_CONFIG environment for Dockerfile -CONFIG_JSON= base64.b64encode( - '{{"DOCKER_CI_PUB": "{DOCKER_CI_PUB}",' - ' "DOCKER_CI_KEY": "{DOCKER_CI_KEY}",' - ' "DOCKER_CI_ADDRESS": "{DOCKER_CI_ADDRESS}",' - ' "SMTP_USER": "{SMTP_USER}",' - ' "SMTP_PWD": "{SMTP_PWD}",' - ' "EMAIL_SENDER": "{EMAIL_SENDER}",' - ' "EMAIL_RCP": "{EMAIL_RCP}"}}'.format(**env)) - -run('mkdir -p /data/report') -put('./', '/data/report') -with cd('/data/report'): - run('chmod 700 report.py') - run('echo "{}" > credentials.json'.format(CONFIG_JSON)) - run('docker build -t report .') - run('rm credentials.json') - run("echo -e '30 09 * * * /usr/bin/docker run report\n' |" - " /usr/bin/crontab -") diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/Dockerfile docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/Dockerfile --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -# VERSION: 0.22 -# DOCKER-VERSION 0.6.3 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Generate docker-ci daily report -# COMMENTS: The build process is initiated by deployment.py - Report configuration is passed through ./credentials.json at -# deployment time. -# TO_BUILD: docker build -t report . -# TO_DEPLOY: docker run report - -from ubuntu:12.04 -maintainer Daniel Mizyrycki - -env PYTHONPATH /report - - -# Add report dependencies -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ - /etc/apt/sources.list -run apt-get update; apt-get install -y python2.7 python-pip ssh rsync - -# Set San Francisco timezone -run echo "America/Los_Angeles" >/etc/timezone -run dpkg-reconfigure --frontend noninteractive tzdata - -# Add report code and set default container command -add . /report -cmd "/report/report.py" diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/report.py docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/report.py --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/report/report.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/report/report.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -#!/usr/bin/python - -'''CONFIG_JSON is a json encoded string base64 environment variable. It is used -to clone docker-ci database, generate docker-ci report and submit it by email. -CONFIG_JSON data comes from the file /report/credentials.json inserted in this -container by deployment.py: - -{ "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", - "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", - "DOCKER_CI_ADDRESS": "user@docker-ci_fqdn_server", - "SMTP_USER": "SMTP_server_user", - "SMTP_PWD": "SMTP_server_password", - "EMAIL_SENDER": "Buildbot_mailing_sender", - "EMAIL_RCP": "Buildbot_mailing_receipient" } ''' - -import os, re, json, sqlite3, datetime, base64 -import smtplib -from datetime import timedelta -from subprocess import call -from os import environ as env - -TODAY = datetime.date.today() - -# Load credentials to the environment -env['CONFIG_JSON'] = base64.b64decode(open('/report/credentials.json').read()) - -# Remove SSH private key as it needs more processing -CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','', - env['CONFIG_JSON'], flags=re.DOTALL)) - -# Populate environment variables -for key in CONFIG: - env[key] = CONFIG[key] - -# Load SSH private key -env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', - env['CONFIG_JSON'],flags=re.DOTALL) - -# Prevent rsync to validate host on first connection to docker-ci -os.makedirs('/root/.ssh') -open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY']) -os.chmod('/root/.ssh/id_rsa',0600) -open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') - - -# Sync buildbot database from docker-ci -call('rsync {}:/data/buildbot/master/state.sqlite .'.format( - env['DOCKER_CI_ADDRESS']), shell=True) - -class SQL: - def __init__(self, database_name): - sql = sqlite3.connect(database_name) - # Use column names as keys for fetchall rows - sql.row_factory = sqlite3.Row - sql = sql.cursor() - self.sql = sql - - def query(self,query_statement): - return self.sql.execute(query_statement).fetchall() - -sql = SQL("state.sqlite") - - -class Report(): - - def __init__(self,period='',date=''): - self.data = [] - self.period = 'date' if not period else period - self.date = str(TODAY) if not date else date - self.compute() - - def compute(self): - '''Compute report''' - if self.period == 'week': - self.week_report(self.date) - else: - self.date_report(self.date) - - - def date_report(self,date): - '''Create a date test report''' - builds = [] - # Get a queryset with all builds from date - rows = sql.query('SELECT * FROM builds JOIN buildrequests' - ' WHERE builds.brid=buildrequests.id and' - ' date(start_time, "unixepoch", "localtime") = "{0}"' - ' GROUP BY number'.format(date)) - build_names = sorted(set([row['buildername'] for row in rows])) - # Create a report build line for a given build - for build_name in build_names: - tried = len([row['buildername'] - for row in rows if row['buildername'] == build_name]) - fail_tests = [row['buildername'] for row in rows if ( - row['buildername'] == build_name and row['results'] != 0)] - fail = len(fail_tests) - fail_details = '' - fail_pct = int(100.0*fail/tried) if tried != 0 else 100 - builds.append({'name': build_name, 'tried': tried, 'fail': fail, - 'fail_pct': fail_pct, 'fail_details':fail_details}) - if builds: - self.data.append({'date': date, 'builds': builds}) - - - def week_report(self,date): - '''Add the week's date test reports to report.data''' - date = datetime.datetime.strptime(date,'%Y-%m-%d').date() - last_monday = date - datetime.timedelta(days=date.weekday()) - week_dates = [last_monday + timedelta(days=x) for x in range(7,-1,-1)] - for date in week_dates: - self.date_report(str(date)) - - def render_text(self): - '''Return rendered report in text format''' - retval = '' - fail_tests = {} - for builds in self.data: - retval += 'Test date: {0}\n'.format(builds['date'],retval) - table = '' - for build in builds['builds']: - table += ('Build {name:15} Tried: {tried:4} ' - ' Failures: {fail:4} ({fail_pct}%)\n'.format(**build)) - if build['name'] in fail_tests: - fail_tests[build['name']] += build['fail_details'] - else: - fail_tests[build['name']] = build['fail_details'] - retval += '{0}\n'.format(table) - retval += '\n Builds failing' - for fail_name in fail_tests: - retval += '\n' + fail_name + '\n' - for (fail_id,fail_url,rn_tests,nr_errors,log_errors, - tracelog_errors) in fail_tests[fail_name]: - retval += fail_url + '\n' - retval += '\n\n' - return retval - - -# Send email -smtp_from = env['EMAIL_SENDER'] -subject = '[docker-ci] Daily report for {}'.format(str(TODAY)) -msg = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format( - smtp_from, env['EMAIL_RCP'], subject) -msg = msg + Report('week').render_text() -server = smtplib.SMTP_SSL('smtp.mailgun.org') -server.login(env['SMTP_USER'], env['SMTP_PWD']) -server.sendmail(smtp_from, env['EMAIL_RCP'], msg) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/setup.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/setup.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/setup.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/setup.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# Set timezone -echo "GMT" >/etc/timezone -dpkg-reconfigure --frontend noninteractive tzdata - -# Set ssh superuser -mkdir -p /data/buildbot /var/run/sshd /run -useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin -sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers -cd /home/sysadmin -mkdir .ssh -chmod 700 .ssh -cat > .ssh/authorized_keys << 'EOF' -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io -EOF -chmod 600 .ssh/authorized_keys -chown -R sysadmin .ssh - -# Fix docker group id for use of host dockerd by sysadmin -sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group - -# Create buildbot configuration -cd /data/buildbot; buildbot create-master master -cp -a /data/buildbot/master/master.cfg.sample \ - /data/buildbot/master/master.cfg -cd /data/buildbot; \ - buildslave create-slave slave localhost:9989 buildworker pass -cp /docker-ci/buildbot/master.cfg /data/buildbot/master - -# Patch github webstatus to capture pull requests -cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks -chown -R sysadmin.sysadmin /data - -# Create nginx configuration -rm /etc/nginx/sites-enabled/default -cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf -/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf - -# Set supervisord buildbot, nginx and sshd processes -/bin/echo -e "\ -[program:buildmaster]\n\ -command=twistd --nodaemon --no_save -y buildbot.tac\n\ -directory=/data/buildbot/master\n\ -user=sysadmin\n\n\ -[program:buildworker]\n\ -command=twistd --nodaemon --no_save -y buildbot.tac\n\ -directory=/data/buildbot/slave\n\ -user=sysadmin\n" > \ - /etc/supervisor/conf.d/buildbot.conf -/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \ - /etc/supervisor/conf.d/nginx.conf -/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \ - /etc/supervisor/conf.d/sshd.conf diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/Dockerfile docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/Dockerfile --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/Dockerfile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/Dockerfile 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder . -# TO_RUN: docker run -rm -u sysadmin \ -# -v /run:/var/socket docker-ci/testbuilder docker-registry -# - -FROM docker-ci/docker-ci -ENV HOME /home/sysadmin - -RUN mkdir /testbuilder -ADD . /testbuilder - -ENTRYPOINT ["/testbuilder/testbuilder.sh"] diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -x -set -e -PROJECT_PATH=$1 - -# Build the docker project -cd /data/$PROJECT_PATH -sg docker -c "docker build -q -rm -t registry ." -cd test; sg docker -c "docker build -q -rm -t docker-registry-test ." - -# Run the tests -sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/docker.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -x -set -e -PROJECT_PATH=$1 - -# Build the docker project -cd /data/$PROJECT_PATH -sg docker -c "docker build -q -rm -t docker ." - -if [ "$DOCKER_RELEASE" == "1" ]; then - # Do nightly release - echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" - set +x - sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" -else - # Run the tests - sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" -fi diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# Download, build and run a docker project tests -# Environment variables: DEPLOYMENT - -cat $0 -set -e -set -x - -PROJECT=$1 -COMMIT=${2-HEAD} -REPO=${3-https://github.com/dotcloud/$PROJECT} -BRANCH=${4-master} -REPO_PROJ="https://github.com/docker-test/$PROJECT" -if [ "$DEPLOYMENT" == "production" ]; then - REPO_PROJ="https://github.com/dotcloud/$PROJECT" -fi -set +x - -# Generate a random string of $1 characters -function random { - cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 -} - -PROJECT_PATH="$PROJECT-tmp-$(random 12)" - -# Set docker-test git user -set -x -git config --global user.email "docker-test@docker.io" -git config --global user.name "docker-test" - -# Fetch project -git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH -cd /data/$PROJECT_PATH -echo "Git commit: $(git rev-parse HEAD)" -git fetch -q $REPO $BRANCH -git merge --no-edit $COMMIT - -# Build the project dockertest -/testbuilder/$PROJECT.sh $PROJECT_PATH -rm -rf /data/$PROJECT_PATH diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/tool/backup.py docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/tool/backup.py --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/tool/backup.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/tool/backup.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -#!/usr/bin/env python - -import os,sys,json -from datetime import datetime -from filecmp import cmp -from subprocess import check_call -from boto.s3.key import Key -from boto.s3.connection import S3Connection - -def ENV(x): - '''Promote an environment variable for global use returning its value''' - retval = os.environ.get(x, '') - globals()[x] = retval - return retval - -ROOT_PATH = '/data/backup/docker-ci' -TODAY = str(datetime.today())[:10] -BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY) -BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH) -ENV('BACKUP_BUCKET') -ENV('BACKUP_AWS_ID') -ENV('BACKUP_AWS_SECRET') - -'''Create full master buildbot backup, avoiding duplicates''' -# Ensure backup path exist -if not os.path.exists(ROOT_PATH): - os.makedirs(ROOT_PATH) -# Make actual backups -check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave' - ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True) -# remove previous dump if it is the same as the latest -if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and - os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE): - os.unlink(os.path._resolve_link(BACKUP_LINK)) -# Recreate backup link pointing to latest backup -try: - os.unlink(BACKUP_LINK) -except: - pass -os.symlink(BACKUP_FILE, BACKUP_LINK) - -# Make backup on S3 -bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET) -k = Key(bucket) -k.key = BACKUP_FILE -k.set_contents_from_filename(BACKUP_FILE) -bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:]) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/VERSION docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/VERSION --- docker.io-0.9.1~dfsg1/hack/infrastructure/docker-ci/VERSION 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/docker-ci/VERSION 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -0.5.6 diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/MAINTAINERS docker.io-1.3.2~dfsg1/hack/infrastructure/MAINTAINERS --- docker.io-0.9.1~dfsg1/hack/infrastructure/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -Ken Cochrane (@kencochrane) -Jerome Petazzoni (@jpetazzo) diff -Nru docker.io-0.9.1~dfsg1/hack/infrastructure/README.md docker.io-1.3.2~dfsg1/hack/infrastructure/README.md --- docker.io-0.9.1~dfsg1/hack/infrastructure/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/infrastructure/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -# Docker project infrastructure - -This is an overview of the Docker infrastructure. - -**Note: obviously, credentials should not be stored in this repository.** -However, when there are credentials, we should list how to obtain them -(e.g. who has them). - - -## Providers - -This should be the list of all the entities providing some kind of -infrastructure service to the Docker project (either for free, -or paid by dotCloud). - - -Provider | Service ---------------|------------------------------------------------- -AWS | packages (S3 bucket), dotCloud PAAS, dev-env, ci -CloudFlare | cdn -Digital Ocean | ci -dotCloud PAAS | website, index, registry, ssl, blog -DynECT | dns (docker.io) -GitHub | repository -Linode | stackbrew -Mailgun | outgoing e-mail -ReadTheDocs | docs - -*Ordered-by: lexicographic* - - -## URLs - -This should be the list of all the infrastructure-related URLs -and which service is handling them. - -URL | Service ----------------------------------------------|--------------------------------- - http://blog.docker.io/ | blog -*http://cdn-registry-1.docker.io/ | registry (pull) - http://debug.docker.io/ | debug tool - http://docs.docker.io/ | docsproxy (proxy to readthedocs) - http://docker-ci.dotcloud.com/ | ci - http://docker.io/ | redirect to www.docker.io (dynect) - http://docker.readthedocs.org/ | docs -*http://get.docker.io/ | packages - https://github.com/dotcloud/docker | repository -*https://index.docker.io/ | index - http://registry-1.docker.io/ | registry (push) - http://staging-docker-ci.dotcloud.com/ | ci -*http://test.docker.io/ | packages -*http://www.docker.io/ | website - http://? (internal URL, not for public use) | stackbrew - -*Ordered-by: lexicographic* - -**Note:** an asterisk in front of the URL means that it is cached by CloudFlare. - - -## Services - -This should be the list of all services referenced above. - -Service | Maintainer(s) | How to update | Source ---------------------|----------------------------|------------------|------- -blog | [@jbarbier] | dotcloud push | https://github.com/dotcloud/blog.docker.io -cdn | [@jpetazzo][] [@samalba][] | cloudflare panel | N/A -ci | [@mzdaniel] | See [docker-ci] | See [docker-ci] -docs | [@metalivedev] | github webhook | docker repo -docsproxy | [@dhrp] | dotcloud push | https://github.com/dotcloud/docker-docs-dotcloud-proxy -index | [@kencochrane] | dotcloud push | private -packages | [@jpetazzo] | hack/release | docker repo -registry | [@samalba] | dotcloud push | https://github.com/dotcloud/docker-registry -repository (github) | N/A | N/A | N/A -ssl (dotcloud) | [@jpetazzo] | dotcloud ops | N/A -ssl (cloudflare) | [@jpetazzo] | cloudflare panel | N/A -stackbrew | [@shin-] | manual | https://github.com/dotcloud/stackbrew/stackbrew -website | [@dhrp] | dotcloud push | https://github.com/dotcloud/www.docker.io - -*Ordered-by: lexicographic* - - -[docker-ci]: docker-ci.rst -[@dhrp]: https://github.com/dhrp -[@jbarbier]: https://github.com/jbarbier -[@jpetazzo]: https://github.com/jpetazzo -[@kencochrane]: https://github.com/kencochrane -[@metalivedev]: https://github.com/metalivedev -[@mzdaniel]: https://github.com/mzdaniel -[@samalba]: https://github.com/samalba -[@shin-]: https://github.com/shin- diff -Nru docker.io-0.9.1~dfsg1/hack/install.sh docker.io-1.3.2~dfsg1/hack/install.sh --- docker.io-0.9.1~dfsg1/hack/install.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/install.sh 2014-11-24 17:38:01.000000000 +0000 @@ -2,19 +2,19 @@ set -e # # This script is meant for quick & easy install via: -# 'curl -sL https://get.docker.io/ | sh' +# 'curl -sSL https://get.docker.com/ | sh' # or: -# 'wget -qO- https://get.docker.io/ | sh' +# 'wget -qO- https://get.docker.com/ | sh' # # # Docker Maintainers: -# To update this script on https://get.docker.io, +# To update this script on https://get.docker.com, # use hack/release.sh during a normal release, # or the following one-liner for script hotfixes: -# s3cmd put --acl-public -P hack/install.sh s3://get.docker.io/index +# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index # -url='https://get.docker.io/' +url='https://get.docker.com/' command_exists() { command -v "$@" > /dev/null 2>&1 @@ -42,7 +42,7 @@ sh_c='sh -c' if [ "$user" != 'root' ]; then if command_exists sudo; then - sh_c='sudo sh -c' + sh_c='sudo -E sh -c' elif command_exists su; then sh_c='su -c' else @@ -54,7 +54,7 @@ curl='' if command_exists curl; then - curl='curl -sL' + curl='curl -sSL' elif command_exists wget; then curl='wget -qO-' elif command_exists busybox && busybox --list-modules | grep -q wget; then @@ -72,11 +72,38 @@ if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then lsb_dist='Debian' fi +if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='Fedora' +fi case "$lsb_dist" in - Ubuntu|Debian) + Fedora) + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker run --rm hello-world' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo + exit 0 + ;; + + Ubuntu|Debian|LinuxMint) export DEBIAN_FRONTEND=noninteractive - + did_apt_get_update= apt_get_update() { if [ -z "$did_apt_get_update" ]; then @@ -84,21 +111,33 @@ did_apt_get_update=1 fi } - - # TODO remove this section once device-mapper lands + + # aufs is preferred over devicemapper; try to ensure the driver is available. if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then kern_extras="linux-image-extra-$(uname -r)" - + apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true - + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' ( set -x; sleep 10 ) fi fi - + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser &> /dev/null; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser missing' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' ) @@ -106,14 +145,14 @@ if [ -z "$curl" ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl' ) - curl='curl -sL' + curl='curl -sSL' fi ( set -x - if [ "https://get.docker.io/" = "$url" ]; then - $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" - elif [ "https://test.docker.io/" = "$url" ]; then - $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" + if [ "https://get.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" + elif [ "https://test.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" else $sh_c "$curl ${url}gpg | apt-key add -" fi @@ -123,7 +162,7 @@ if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x - $sh_c 'docker run busybox echo "Docker has been successfully installed!"' + $sh_c 'docker run --rm hello-world' ) || true fi your_user=your-user @@ -138,9 +177,9 @@ echo exit 0 ;; - + Gentoo) - if [ "$url" = "https://test.docker.io/" ]; then + if [ "$url" = "https://test.docker.com/" ]; then echo >&2 echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' echo >&2 ' The portage tree should contain the latest stable release of Docker, but' @@ -153,7 +192,7 @@ echo >&2 exit 1 fi - + ( set -x $sh_c 'sleep 3; emerge app-emulation/docker' @@ -162,12 +201,14 @@ ;; esac -echo >&2 -echo >&2 ' Either your platform is not easily detectable, is not supported by this' -echo >&2 ' installer script (yet - PRs welcome!), or does not yet have a package for' -echo >&2 ' Docker. Please visit the following URL for more detailed installation' -echo >&2 ' instructions:' -echo >&2 -echo >&2 ' http://docs.docker.io/en/latest/installation/' -echo >&2 +cat >&2 <<'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/en/latest/installation/ + +EOF exit 1 diff -Nru docker.io-0.9.1~dfsg1/hack/MAINTAINERS docker.io-1.3.2~dfsg1/hack/MAINTAINERS --- docker.io-0.9.1~dfsg1/hack/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1 +1,4 @@ Tianon Gravi (@tianon) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) +dind: Jerome Petazzoni (@jpetazzo) diff -Nru docker.io-0.9.1~dfsg1/hack/MAINTAINERS.md docker.io-1.3.2~dfsg1/hack/MAINTAINERS.md --- docker.io-0.9.1~dfsg1/hack/MAINTAINERS.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/MAINTAINERS.md 2014-11-24 17:38:01.000000000 +0000 @@ -4,12 +4,12 @@ Dear maintainer. Thank you for investing the time and energy to help make Docker as useful as possible. Maintaining a project is difficult, -sometimes unrewarding work. Sure, you will get to contribute cool +sometimes unrewarding work. Sure, you will get to contribute cool features to the project. But most of your time will be spent reviewing, -cleaning up, documenting, answering questions, justifying design +cleaning up, documenting, answering questions, and justifying design decisions - while everyone has all the fun! But remember - the quality -of the maintainers work is what distinguishes the good projects from the -great. So please be proud of your work, even the unglamourous parts, +of the maintainers' work is what distinguishes the good projects from +the great. So please be proud of your work, even the unglamourous parts, and encourage a culture of appreciation and respect for *every* aspect of improving the project - not just the hot new features. @@ -20,78 +20,110 @@ This is a living document - if you see something out of date or missing, speak up! -## What are a maintainer's responsibility? +## What is a maintainer's responsibility? It is every maintainer's responsibility to: -* 1) Expose a clear roadmap for improving their component. -* 2) Deliver prompt feedback and decisions on pull requests. -* 3) Be available to anyone with questions, bug reports, criticism etc. +1. Expose a clear road map for improving their component. +2. Deliver prompt feedback and decisions on pull requests. +3. Be available to anyone with questions, bug reports, criticism etc. on their component. This includes IRC, GitHub requests and the mailing list. -* 4) Make sure their component respects the philosophy, design and - roadmap of the project. +4. Make sure their component respects the philosophy, design and + road map of the project. ## How are decisions made? -Short answer: with pull requests to the docker repository. +Short answer: with pull requests to the Docker repository. Docker is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, roadmap and APIs. *If it's -part of the project, it's in the repo. It's in the repo, it's part of +project, including its philosophy, design, road map, and APIs. *If it's +part of the project, it's in the repo. If it's in the repo, it's part of the project.* As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto. And so on. +a change to the philosophy manifesto, and so on. -All decisions affecting docker, big and small, follow the same 3 steps: +All decisions affecting Docker, big and small, follow the same 3 steps: * Step 1: Open a pull request. Anyone can do this. * Step 2: Discuss the pull request. Anyone can do this. -* Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?") - +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") + + Accepting pull requests + - If the pull request appears to be ready to merge, give it a `LGTM`, which + stands for "Looks Good To Me". + - If the pull request has some small problems that need to be changed, make + a comment adressing the issues. + - If the changes needed to a PR are small, you can add a "LGTM once the + following comments are adressed..." this will reduce needless back and + forth. + - If the PR only needs a few changes before being merged, any MAINTAINER can + make a replacement PR that incorporates the existing commits and fixes the + problems before a fast track merge. + + Closing pull requests + - If a PR appears to be abandoned, after having attempted to contact the + original contributor, then a replacement PR may be made. Once the + replacement PR is made, any contributor may close the original one. + - If you are not sure if the pull request implements a good feature or you + do not understand the purpose of the PR, ask the contributor to provide + more documentation. If the contributor is not able to adequately explain + the purpose of the PR, the PR may be closed by any MAINTAINER. + - If a MAINTAINER feels that the pull request is sufficiently architecturally + flawed, or if the pull request needs significantly more design discussion + before being considered, the MAINTAINER should close the pull request with + a short explanation of what discussion still needs to be had. It is + important not to leave such pull requests open, as this will waste both the + MAINTAINER's time and the contributor's time. It is not good to string a + contributor on for weeks or months, having them make many changes to a PR + that will eventually be rejected. ## Who decides what? -So all decisions are pull requests, and the relevant maintainer makes -the decision by accepting or refusing the pull request. But how do we -identify the relevant maintainer for a given pull request? +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing pull requests. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the +required majority. Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours truly, Solomon Hykes, in the role of BDFL. This means that all -decisions are made by default by Solomon. Since making every decision +decisions are made, by default, by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. -The relevant maintainer for a pull request is assigned in 3 steps: +The relevant maintainers for a pull request can be worked out in 2 steps: -* Step 1: Determine the subdirectory affected by the pull request. This +* Step 1: Determine the subdirectories affected by the pull request. This might be `src/registry`, `docs/source/api`, or any other part of the repo. * Step 2: Find the `MAINTAINERS` file which affects this directory. If the directory itself does not have a `MAINTAINERS` file, work your way up the repo hierarchy until you find one. -* Step 3: The first maintainer listed is the primary maintainer. The - pull request is assigned to him. He may assign it to other listed - maintainers, at his discretion. +There is also a `hacks/getmaintainers.sh` script that will print out the +maintainers for a specified directory. + +### I'm a maintainer, and I'm going on holiday +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. -### I'm a maintainer, should I make pull requests too? +### I'm a maintainer. Should I make pull requests too? Yes. Nobody should ever push to master directly. All changes should be made through a pull request. ### Who assigns maintainers? -Solomon. +Solomon has final `LGTM` approval for all pull requests to `MAINTAINERS` files. ### How is this process changed? diff -Nru docker.io-0.9.1~dfsg1/hack/make/binary docker.io-1.3.2~dfsg1/hack/make/binary --- docker.io-0.9.1~dfsg1/hack/make/binary 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/binary 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,5 @@ #!/bin/bash +set -e DEST=$1 @@ -11,5 +12,6 @@ " \ ./docker echo "Created binary: $DEST/docker-$VERSION" +ln -sf "docker-$VERSION" "$DEST/docker" hash_files "$DEST/docker-$VERSION" diff -Nru docker.io-0.9.1~dfsg1/hack/make/cover docker.io-1.3.2~dfsg1/hack/make/cover --- docker.io-0.9.1~dfsg1/hack/make/cover 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/cover 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,5 @@ #!/bin/bash +set -e DEST="$1" diff -Nru docker.io-0.9.1~dfsg1/hack/make/cross docker.io-1.3.2~dfsg1/hack/make/cross --- docker.io-0.9.1~dfsg1/hack/make/cross 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/cross 2014-11-24 17:38:01.000000000 +0000 @@ -1,7 +1,14 @@ #!/bin/bash +set -e DEST=$1 +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 +) + # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary/docker-$VERSION" ]; then mkdir -p "$DEST/linux/amd64" @@ -17,7 +24,10 @@ mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION export GOOS=${platform%/*} export GOARCH=${platform##*/} - export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms (TODO this might change someday) + if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported + fi source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" ) done diff -Nru docker.io-0.9.1~dfsg1/hack/make/dynbinary docker.io-1.3.2~dfsg1/hack/make/dynbinary --- docker.io-0.9.1~dfsg1/hack/make/dynbinary 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/dynbinary 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,5 @@ #!/bin/bash +set -e DEST=$1 @@ -38,6 +39,7 @@ # exported so that "dyntest" can easily access it later without recalculating it ( - export LDFLAGS_STATIC_DOCKER="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\"" + export LDFLAGS_STATIC_DOCKER="-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X $DOCKER_PKG/dockerversion.INITPATH \"$DOCKER_INITPATH\"" + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary source "$(dirname "$BASH_SOURCE")/binary" ) diff -Nru docker.io-0.9.1~dfsg1/hack/make/dyntest docker.io-1.3.2~dfsg1/hack/make/dyntest --- docker.io-0.9.1~dfsg1/hack/make/dyntest 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/dyntest 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -#!/bin/bash - -DEST=$1 -INIT=$DEST/../dynbinary/dockerinit-$VERSION - -set -e - -if [ ! -x "$INIT" ]; then - echo >&2 'error: dynbinary must be run before dyntest' - false -fi - -( - export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC_DOCKER=" - -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" - " - source "$(dirname "$BASH_SOURCE")/test" -) diff -Nru docker.io-0.9.1~dfsg1/hack/make/dyntest-integration docker.io-1.3.2~dfsg1/hack/make/dyntest-integration --- docker.io-0.9.1~dfsg1/hack/make/dyntest-integration 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/dyntest-integration 2014-11-24 17:38:01.000000000 +0000 @@ -1,10 +1,9 @@ #!/bin/bash +set -e DEST=$1 INIT=$DEST/../dynbinary/dockerinit-$VERSION -set -e - if [ ! -x "$INIT" ]; then echo >&2 'error: dynbinary must be run before dyntest-integration' false @@ -13,7 +12,7 @@ ( export TEST_DOCKERINIT_PATH="$INIT" export LDFLAGS_STATIC_DOCKER=" - -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" " source "$(dirname "$BASH_SOURCE")/test-integration" ) diff -Nru docker.io-0.9.1~dfsg1/hack/make/dyntest-unit docker.io-1.3.2~dfsg1/hack/make/dyntest-unit --- docker.io-0.9.1~dfsg1/hack/make/dyntest-unit 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/dyntest-unit 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +#!/bin/bash +set -e + +DEST=$1 +INIT=$DEST/../dynbinary/dockerinit-$VERSION + +if [ ! -x "$INIT" ]; then + echo >&2 'error: dynbinary must be run before dyntest-unit' + false +fi + +( + export TEST_DOCKERINIT_PATH="$INIT" + export LDFLAGS_STATIC_DOCKER=" + -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" + " + source "$(dirname "$BASH_SOURCE")/test-unit" +) diff -Nru docker.io-0.9.1~dfsg1/hack/make/.ensure-busybox docker.io-1.3.2~dfsg1/hack/make/.ensure-busybox --- docker.io-0.9.1~dfsg1/hack/make/.ensure-busybox 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/.ensure-busybox 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +#!/bin/bash + +if ! docker inspect busybox &> /dev/null; then + if [ -d /docker-busybox ]; then + source "$(dirname "$BASH_SOURCE")/.ensure-scratch" + ( set -x; docker build -t busybox /docker-busybox ) + else + ( set -x; docker pull busybox ) + fi +fi diff -Nru docker.io-0.9.1~dfsg1/hack/make/.ensure-scratch docker.io-1.3.2~dfsg1/hack/make/.ensure-scratch --- docker.io-0.9.1~dfsg1/hack/make/.ensure-scratch 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/.ensure-scratch 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,21 @@ +#!/bin/bash + +if ! docker inspect scratch &> /dev/null; then + # let's build a "docker save" tarball for "scratch" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + mkdir -p /docker-scratch + ( + cd /docker-scratch + echo '{"scratch":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cf /docker-scratch.tar -C /docker-scratch . ) + ( set -x; docker load --input /docker-scratch.tar ) +fi diff -Nru docker.io-0.9.1~dfsg1/hack/make/.go-compile-test-dir docker.io-1.3.2~dfsg1/hack/make/.go-compile-test-dir --- docker.io-0.9.1~dfsg1/hack/make/.go-compile-test-dir 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/.go-compile-test-dir 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,26 @@ +#!/bin/bash +set -e + +# Compile phase run by parallel in test-unit. No support for coverpkg + +dir=$1 +out_file="$DEST/precompiled/$dir.test" +testcover=() +if [ "$HAVE_GO_TEST_COVER" ]; then + # if our current go install has -cover, we want to use it :) + mkdir -p "$DEST/coverprofiles" + coverprofile="docker${dir#.}" + coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" + testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg +fi +if [ "$BUILDFLAGS_FILE" ]; then + readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" +fi +( + cd "$dir" + go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c +) +[ $? -ne 0 ] && return 1 +mkdir -p "$(dirname "$out_file")" +mv "$dir/$(basename "$dir").test" "$out_file" +echo "Precompiled: ${DOCKER_PKG}${dir#.}" diff -Nru docker.io-0.9.1~dfsg1/hack/make/test docker.io-1.3.2~dfsg1/hack/make/test --- docker.io-0.9.1~dfsg1/hack/make/test 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/test 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -#!/bin/bash - -DEST=$1 - -set -e - -RED=$'\033[31m' -GREEN=$'\033[32m' -TEXTRESET=$'\033[0m' # reset the foreground colour - -# Run Docker's test suite, including sub-packages, and store their output as a bundle -# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -# You can use this to select certain tests to run, eg. -# -# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test -# -bundle_test() { - { - date - - TESTS_FAILED=() - for test_dir in $(find_dirs '*_test.go'); do - echo - - if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir "$test_dir"; then - TESTS_FAILED+=("$test_dir") - echo - echo "${RED}Tests failed: $test_dir${TEXTRESET}" - sleep 1 # give it a second, so observers watching can take note - fi - done - - echo - echo - echo - - # if some tests fail, we want the bundlescript to fail, but we want to - # try running ALL the tests first, hence TESTS_FAILED - if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then - echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" - echo - false - else - echo "${GREEN}Test success${TEXTRESET}" - echo - true - fi - } 2>&1 | tee $DEST/test.log -} - -bundle_test diff -Nru docker.io-0.9.1~dfsg1/hack/make/test-integration docker.io-1.3.2~dfsg1/hack/make/test-integration --- docker.io-0.9.1~dfsg1/hack/make/test-integration 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/test-integration 2014-11-24 17:38:01.000000000 +0000 @@ -1,16 +1,15 @@ #!/bin/bash +set -e DEST=$1 -set -e - bundle_test_integration() { LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir ./integration \ - "-coverpkg $(find_dirs '*.go' | sed 's,^\.,github.com/dotcloud/docker,g' | paste -d, -s)" + "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)" } # this "grep" hides some really irritating warnings that "go test -coverpkg" # spews when it is given packages that aren't used +exec > >(tee -a $DEST/test.log) 2>&1 bundle_test_integration 2>&1 \ - | grep --line-buffered -v '^warning: no packages being tested depend on ' \ - | tee $DEST/test.log + | grep --line-buffered -v '^warning: no packages being tested depend on ' diff -Nru docker.io-0.9.1~dfsg1/hack/make/test-integration-cli docker.io-1.3.2~dfsg1/hack/make/test-integration-cli --- docker.io-0.9.1~dfsg1/hack/make/test-integration-cli 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/test-integration-cli 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +DEST=$1 + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} + +bundle_test_integration_cli() { + go_test_dir ./integration-cli +} + +# subshell so that we can export PATH without breaking other things +exec > >(tee -a $DEST/test.log) 2>&1 +( + export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" + + if ! command -v docker &> /dev/null; then + echo >&2 'error: binary or dynbinary must be run before test-integration-cli' + false + fi + + # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers + exec 41>&1 42>&2 + + ( set -x; exec \ + docker --daemon --debug \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --exec-driver "$DOCKER_EXECDRIVER" \ + --pidfile "$DEST/docker.pid" \ + &> "$DEST/docker.log" + ) & + + # pull the busybox image before running the tests + sleep 2 + + source "$(dirname "$BASH_SOURCE")/.ensure-busybox" + + bundle_test_integration_cli + + for pid in $(find "$DEST" -name docker.pid); do + DOCKER_PID=$(set -x; cat "$pid") + ( set -x; kill $DOCKER_PID ) + wait $DOCKERD_PID || true + done +) diff -Nru docker.io-0.9.1~dfsg1/hack/make/test-unit docker.io-1.3.2~dfsg1/hack/make/test-unit --- docker.io-0.9.1~dfsg1/hack/make/test-unit 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/test-unit 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +DEST=$1 +: ${PARALLEL_JOBS:=$(nproc)} + +RED=$'\033[31m' +GREEN=$'\033[32m' +TEXTRESET=$'\033[0m' # reset the foreground colour + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, eg. +# +# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + { + date + + # Run all the tests if no TESTDIRS were specified. + if [ -z "$TESTDIRS" ]; then + TESTDIRS=$(find_dirs '*_test.go') + fi + + if command -v parallel &> /dev/null; then ( + # accomodate parallel to be able to access variables + export SHELL="$BASH" + export HOME="$(mktemp -d)" + mkdir -p "$HOME/.parallel" + touch "$HOME/.parallel/ignored_vars" + export LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" + export TESTFLAGS + export HAVE_GO_TEST_COVER + export DEST + # some hack to export array variables + export BUILDFLAGS_FILE="$HOME/buildflags_file" + ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" + rm -rf "$HOME" + ) else + # aww, no "parallel" available - fall back to boring + for test_dir in $TESTDIRS; do + "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" + done + fi + echo "$TESTDIRS" | go_run_test_dir + } +} + +go_run_test_dir() { + TESTS_FAILED=() + while read dir; do + echo + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" + precompiled="$DEST/precompiled/$dir.test" + if ! ( cd "$dir" && "$precompiled" $TESTFLAGS ); then + TESTS_FAILED+=("$dir") + echo + echo "${RED}Tests failed: $dir${TEXTRESET}" + sleep 1 # give it a second, so observers watching can take note + fi + done + + echo + echo + echo + + # if some tests fail, we want the bundlescript to fail, but we want to + # try running ALL the tests first, hence TESTS_FAILED + if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then + echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" + echo + false + else + echo "${GREEN}Test success${TEXTRESET}" + echo + true + fi +} + +exec > >(tee -a $DEST/test.log) 2>&1 +bundle_test_unit diff -Nru docker.io-0.9.1~dfsg1/hack/make/ubuntu docker.io-1.3.2~dfsg1/hack/make/ubuntu --- docker.io-0.9.1~dfsg1/hack/make/ubuntu 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/ubuntu 2014-11-24 17:38:01.000000000 +0000 @@ -8,8 +8,8 @@ fi PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" -PACKAGE_URL="http://www.docker.io/" -PACKAGE_MAINTAINER="docker@dotcloud.com" +PACKAGE_URL="http://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" PACKAGE_DESCRIPTION="Linux container runtime Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and @@ -36,7 +36,28 @@ mkdir -p $DIR/etc/default cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker mkdir -p $DIR/lib/systemd/system - cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/ + cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/ + + # Include contributed completions + mkdir -p $DIR/etc/bash_completion.d + cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ + mkdir -p $DIR/usr/share/zsh/vendor-completions + cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ + mkdir -p $DIR/etc/fish/completions + cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ + + # Include contributed man pages + docs/man/md2man-all.sh -q + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in docs/man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done # Copy the binary # This will fail if the binary bundle hasn't been built @@ -114,7 +135,7 @@ --deb-recommends ca-certificates \ --deb-recommends git \ --deb-recommends xz-utils \ - --deb-suggests cgroup-lite \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts docker \ diff -Nru docker.io-0.9.1~dfsg1/hack/make/.validate docker.io-1.3.2~dfsg1/hack/make/.validate --- docker.io-0.9.1~dfsg1/hack/make/.validate 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/.validate 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,33 @@ +#!/bin/bash + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/docker/docker.git' + VALIDATE_BRANCH='master' + + if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then + VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" + VALIDATE_BRANCH="${TRAVIS_BRANCH}" + fi + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff -Nru docker.io-0.9.1~dfsg1/hack/make/validate-dco docker.io-1.3.2~dfsg1/hack/make/validate-dco --- docker.io-0.9.1~dfsg1/hack/make/validate-dco 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/validate-dco 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,56 @@ +#!/bin/bash + +source "$(dirname "$BASH_SOURCE")/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +elif [ -z "$notDocs" -a $adds -le 1 -a $dels -le 1 ]; then + echo 'Congratulations! DCO small-patch-exception material!' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff -Nru docker.io-0.9.1~dfsg1/hack/make/validate-gofmt docker.io-1.3.2~dfsg1/hack/make/validate-gofmt --- docker.io-0.9.1~dfsg1/hack/make/validate-gofmt 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make/validate-gofmt 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +#!/bin/bash + +source "$(dirname "$BASH_SOURCE")/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff -Nru docker.io-0.9.1~dfsg1/hack/make.sh docker.io-1.3.2~dfsg1/hack/make.sh --- docker.io-0.9.1~dfsg1/hack/make.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/make.sh 2014-11-24 17:38:01.000000000 +0000 @@ -6,7 +6,7 @@ # # Requirements: # - The current directory should be a checkout of the docker source code -# (http://github.com/dotcloud/docker). Whatever version is checked out +# (http://github.com/docker/docker). Whatever version is checked out # will be built. # - The VERSION file, at the root of the repository, should exist, and # will be used as Docker binary version and package version. @@ -18,14 +18,16 @@ # - The right way to call this script is to invoke "make" from # your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then -# "docker run hack/make.sh" in the resulting container image. +# "docker run hack/make.sh" in the resulting image. # set -o pipefail +export DOCKER_PKG='github.com/docker/docker' + # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! -if [ "$(pwd)" != '/go/src/github.com/dotcloud/docker' ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then +if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then { echo "# WARNING! I don't seem to be running in the Docker container." echo "# The result of this command might be an incorrect build, and will not be" @@ -40,12 +42,19 @@ # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( + validate-dco + validate-gofmt + binary - test + + test-unit test-integration + test-integration-cli + dynbinary - dyntest + dyntest-unit dyntest-integration + cover cross tgz @@ -70,8 +79,8 @@ if [ "$AUTO_GOPATH" ]; then rm -rf .gopath - mkdir -p .gopath/src/github.com/dotcloud - ln -sf ../../../.. .gopath/src/github.com/dotcloud/docker + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" fi @@ -81,25 +90,53 @@ exit 1 fi +if [ -z "$DOCKER_CLIENTONLY" ]; then + DOCKER_BUILDTAGS+=" daemon" +fi + # Use these flags when compiling the tests and final binary LDFLAGS=' -w - -X github.com/dotcloud/docker/dockerversion.GITCOMMIT "'$GITCOMMIT'" - -X github.com/dotcloud/docker/dockerversion.VERSION "'$VERSION'" + -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" + -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" ' LDFLAGS_STATIC='-linkmode external' EXTLDFLAGS_STATIC='-static' -BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" ) +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" ) +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. +: ${TIMEOUT:=30m} +TESTFLAGS+=" -test.timeout=${TIMEOUT}" # A few more flags that are specific just to building a completely-static binary (see hack/make/binary) # PLEASE do not use these anywhere else. EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" LDFLAGS_STATIC_DOCKER=" $LDFLAGS_STATIC - -X github.com/dotcloud/docker/dockerversion.IAMSTATIC true + -X $DOCKER_PKG/dockerversion.IAMSTATIC true -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" " +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + HAVE_GO_TEST_COVER= if \ go help testflag | grep -- -cover > /dev/null \ @@ -125,7 +162,8 @@ testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( - set -x + export DEST + echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" cd "$dir" go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) @@ -135,8 +173,18 @@ # holding certain files ($1 parameter), and prints their paths on standard # output, one per line. find_dirs() { - find -not \( \ - \( -wholename './vendor' -o -wholename './integration' -o -wholename './contrib' -o -wholename './pkg/mflag/example' \) \ + find . -not \( \ + \( \ + -wholename './vendor' \ + -o -wholename './integration' \ + -o -wholename './integration-cli' \ + -o -wholename './contrib' \ + -o -wholename './pkg/mflag/example' \ + -o -wholename './.git' \ + -o -wholename './bundles' \ + -o -wholename './docs' \ + -o -wholename './pkg/libcontainer/nsinit' \ + \) \ -prune \ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u } diff -Nru docker.io-0.9.1~dfsg1/hack/PACKAGERS.md docker.io-1.3.2~dfsg1/hack/PACKAGERS.md --- docker.io-0.9.1~dfsg1/hack/PACKAGERS.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/PACKAGERS.md 2014-11-24 17:38:01.000000000 +0000 @@ -45,10 +45,10 @@ To build Docker, you will need the following: * A recent version of git and mercurial -* Go version 1.2 or later +* Go version 1.3 or later * A clean checkout of the source added to a valid [Go workspace](http://golang.org/doc/code.html#Workspaces) under the path - *src/github.com/dotcloud/docker* (unless you plan to use `AUTO_GOPATH`, + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, explained in more detail below). To build the Docker daemon, you will additionally need: @@ -145,18 +145,46 @@ ``` This will cause the build scripts to set up a reasonable `GOPATH` that -automatically and properly includes both dotcloud/docker from the local +automatically and properly includes both docker/docker from the local directory, and the local "./vendor" directory as necessary. ### `DOCKER_BUILDTAGS` If you're building a binary that may need to be used on platforms that include AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: - ```bash export DOCKER_BUILDTAGS='apparmor' ``` +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + ### Static Daemon If it is feasible within the constraints of your distribution, you should @@ -238,7 +266,14 @@ installed and available at runtime: * iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) * XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) Additionally, the Docker client needs the following software to be installed and available at runtime: @@ -263,10 +298,10 @@ Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: -* LXC execution driver (requires version 0.8 or later of the LXC utility scripts) +* LXC execution driver (requires version 1.0 or later of the LXC utility scripts) * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) -* experimental BTRFS graph driver (requires BTRFS support enabled in the kernel) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) ## Daemon Init Script diff -Nru docker.io-0.9.1~dfsg1/hack/RELEASE-CHECKLIST.md docker.io-1.3.2~dfsg1/hack/RELEASE-CHECKLIST.md --- docker.io-0.9.1~dfsg1/hack/RELEASE-CHECKLIST.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/RELEASE-CHECKLIST.md 2014-11-24 17:38:01.000000000 +0000 @@ -6,26 +6,76 @@ If your experience deviates from this document, please document the changes to keep it up-to-date. +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + ### 1. Pull from master and create a release branch +Note: Even for major releases, all of X, Y and Z in vX.Y.Z must be specified (e.g. v1.0.0). + ```bash export VERSION=vX.Y.Z -git checkout release -git fetch -git reset --hard origin/release +git fetch origin +git branch -D release || true +git checkout --track origin/release git checkout -b bump_$VERSION +``` + +If it's a regular release, we usually merge master. +```bash git merge origin/master ``` +Otherwise, if it is a hotfix release, we cherry-pick only the commits we want. +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick +git cherry-pick +... +``` + ### 2. Update CHANGELOG.md -You can run this command for reference: +You can run this command for reference with git 2.0: ```bash -LAST_VERSION=$(git tag | grep -E 'v[0-9\.]+$' | sort -nr | head -n 1) -git log --stat $LAST_VERSION..HEAD +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION ``` +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Remote API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. Each change should be listed under a category heading formatted as `#### CATEGORY`. `CATEGORY` should describe which part of the project is affected. @@ -55,7 +105,7 @@ #### Builder -+ 'docker build -t FOO .' applies the tag FOO to the newly built container ++ 'docker build -t FOO .' applies the tag FOO to the newly built image #### Remote API @@ -80,61 +130,64 @@ echo ${VERSION#v} > VERSION ``` -### 4. Run all tests +### 4. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: ```bash -make test +make docs ``` -### 5. Test the docs +To make a shared test at http://beta-docs.docker.io: -Make sure that your tree includes documentation for any modified or -new features, syntax or semantic changes. Instructions for building -the docs are in `docs/README.md`. +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` -### 6. Commit and create a pull request to the "release" branch +### 5. Commit and create a pull request to the "release" branch ```bash git add VERSION CHANGELOG.md git commit -m "Bump version to $VERSION" -git push origin bump_$VERSION -echo "https://github.com/dotcloud/docker/compare/release...bump_$VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release...$GITHUBUSER:bump_$VERSION?expand=1" ``` That last command will give you the proper link to visit to ensure that you open the PR against the "release" branch instead of accidentally against "master" (like so many brave souls before you already have). -### 7. Get 2 other maintainers to validate the pull request +### 6. Get 2 other maintainers to validate the pull request + +### 7. Publish binaries -### 8. Publish binaries +To run this you will need access to the release credentials. Get them from the Core maintainers. -To run this you will need access to the release credentials. -Get them from [the infrastructure maintainers]( -https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS). +Replace "..." with the respective credentials: ```bash docker build -t docker . -export AWS_S3_BUCKET="test.docker.io" -export AWS_ACCESS_KEY="$(cat ~/.aws/access_key)" -export AWS_SECRET_KEY="$(cat ~/.aws/secret_key)" -export GPG_PASSPHRASE=supersecretsesame docker run \ - -e AWS_S3_BUCKET=test.docker.io \ - -e AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY \ - -e GPG_PASSPHRASE \ - -i -t -privileged \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ docker \ hack/release.sh ``` -It will run the test suite one more time, build the binaries and packages, -and upload to the specified bucket (you should use test.docker.io for -general testing, and once everything is fine, switch to get.docker.io as +It will run the test suite, build the binaries and packages, +and upload to the specified bucket (you should use test.docker.com for +general testing, and once everything is fine, switch to get.docker.com as noted below). -After the binaries and packages are uploaded to test.docker.io, make sure +After the binaries and packages are uploaded to test.docker.com, make sure they get tested in both Ubuntu and Debian for any obvious installation issues or runtime issues. @@ -142,51 +195,86 @@ help testing! An easy way to get some useful links for sharing: ```bash -echo "Ubuntu/Debian install script: curl -sLS https://test.docker.io/ | sh" -echo "Linux 64bit binary: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}" -echo "Darwin/OSX 64bit client binary: https://test.docker.io/builds/Darwin/x86_64/docker-${VERSION#v}" -echo "Darwin/OSX 32bit client binary: https://test.docker.io/builds/Darwin/i386/docker-${VERSION#v}" -echo "Linux 64bit tgz: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Ubuntu/Debian: https://test.docker.com/ubuntu or curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 32bit client binary: https://test.docker.com/builds/Darwin/i386/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" ``` Once they're tested and reasonably believed to be working, run against -get.docker.io: +get.docker.com: ```bash docker run \ - -e AWS_S3_BUCKET=get.docker.io \ - -e AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY \ - -e GPG_PASSPHRASE \ - -i -t -privileged \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY="..." \ + -e AWS_SECRET_KEY="..." \ + -e GPG_PASSPHRASE="..." \ + -i -t --privileged \ docker \ hack/release.sh ``` +### 8. Breakathon + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + ### 9. Apply tag +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + ```bash git tag -a $VERSION -m $VERSION bump_$VERSION git push origin $VERSION ``` -It's very important that we don't make the tag until after the official -release is uploaded to get.docker.io! - ### 10. Go to github to merge the `bump_$VERSION` branch into release -Don't delete the leftover branch just yet, as we will need it for the next step. +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! -### 11. Go to github to merge the `bump_$VERSION` branch into docs +### 11. Update the docs branch -Merging the pull request to the docs branch will automatically -update the documentation on the "latest" revision of the docs. You -should see the updated docs 5-10 minutes after the merge. The docs -will appear on http://docs.docker.io/. For more information about -documentation releases, see `docs/README.md`. +If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's +documentation: -Don't forget to push that pretty blue button to delete the leftover -branch afterwards! +```bash +git checkout -b docs-$PREVIOUS_MAJOR_MINOR docs +git fetch +git reset --hard origin/docs +git push -f origin docs-$PREVIOUS_MAJOR_MINOR +``` + +You will need the `awsconfig` file added to the `docs/` directory to contain the +s3 credentials for the bucket you are deploying to. + +```bash +git checkout -b docs release || git checkout docs +git fetch +git reset --hard origin/release +git push -f origin docs +make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release +``` + +The docs will appear on http://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Ask Sven, or JohnC to invalidate the cloudfront cache using the CND Planet chrome applet. ### 12. Create a new pull request to merge release back into master @@ -199,8 +287,8 @@ echo ${VERSION#v}-dev > VERSION git add VERSION git commit -m "Change version to $(cat VERSION)" -git push origin merge_release_$VERSION -echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION" +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" ``` Again, get two maintainers to validate, then merge, then push that pretty diff -Nru docker.io-0.9.1~dfsg1/hack/release.sh docker.io-1.3.2~dfsg1/hack/release.sh --- docker.io-0.9.1~dfsg1/hack/release.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/release.sh 2014-11-24 17:38:01.000000000 +0000 @@ -27,11 +27,11 @@ - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" -docker run -e AWS_S3_BUCKET=get-staging.docker.io \ - -e AWS_ACCESS_KEY=AKI1234... \ - -e AWS_SECRET_KEY=sEs4mE... \ - -e GPG_PASSPHRASE=m0resEs4mE... \ - -i -t -privileged \ +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY=... \ + -e AWS_SECRET_KEY=... \ + -e GPG_PASSPHRASE=... \ + -i -t --privileged \ docker ./hack/release.sh EOF exit 1 @@ -41,8 +41,8 @@ [ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage [ "$GPG_PASSPHRASE" ] || usage -[ -d /go/src/github.com/dotcloud/docker ] || usage -cd /go/src/github.com/dotcloud/docker +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker [ -x hack/make.sh ] || usage RELEASE_BUNDLES=( @@ -53,16 +53,20 @@ ) if [ "$1" != '--release-regardless-of-test-failure' ]; then - RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" ) + RELEASE_BUNDLES=( + test-unit test-integration + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) fi - + VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET # These are the 2 keys we've used to sign the deb's -# release (get.docker.io) +# release (get.docker.com) # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" -# test (test.docker.io) +# test (test.docker.com) # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" setup_s3() { @@ -88,7 +92,7 @@ s3_url() { case "$BUCKET" in - get.docker.io|test.docker.io) + get.docker.com|test.docker.com) echo "https://$BUCKET" ;; *) @@ -266,19 +270,33 @@ done # Upload keys - s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ + s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg + local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 + if [[ $BUCKET == test* ]]; then + gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 + fi + # Upload repo s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ cat < /etc/apt/sources.list.d/docker.list + # Then import the repository key -apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys $gpgFingerprint + # Install docker -apt-get update ; apt-get install -y lxc-docker +apt-get update +apt-get install -y lxc-docker # # Alternatively, just use the curl-able install.sh script provided at $(s3_url) @@ -309,7 +327,7 @@ cat </dev/null || { gpg --gen-key --batch <@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.escape(DCO)), re.MULTILINE|re.UNICODE) - -failed_commits = 0 - -for commit in commits: - commit['message'] = commit['message'][1:] - # trim off our '.' that exists just to prevent fun YAML parsing issues - # see https://github.com/dotcloud/docker/pull/3836#issuecomment-33723094 - # and https://travis-ci.org/dotcloud/docker/builds/17926783 - - commit['stat'] = subprocess.check_output([ - 'git', 'log', '--format=format:', '--max-count=1', - '--name-status', commit['hash'], '--', - ]) - if commit['stat'] == '': - print 'Commit {0} has no actual changed content, skipping.'.format(commit['hash']) - continue - - m = p.search(commit['message']) - if not m: - print 'Commit {1} does not have a properly formatted "{0}" marker.'.format(DCO, commit['hash']) - failed_commits += 1 - continue # print ALL the commits that don't have a proper DCO - - (name, email, github) = m.groups() - - # TODO verify that "github" is the person who actually made this commit via the GitHub API - -if failed_commits > 0: - exit(failed_commits) - -print 'All commits have a valid "{0}" marker.'.format(DCO) -exit(0) diff -Nru docker.io-0.9.1~dfsg1/hack/travis/env.py docker.io-1.3.2~dfsg1/hack/travis/env.py --- docker.io-0.9.1~dfsg1/hack/travis/env.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/travis/env.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -import os -import subprocess - -if 'TRAVIS' not in os.environ: - print 'TRAVIS is not defined; this should run in TRAVIS. Sorry.' - exit(127) - -if os.environ['TRAVIS_PULL_REQUEST'] != 'false': - commit_range = ['upstream/' + os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD'] -else: - try: - subprocess.check_call([ - 'git', 'log', '-1', '--format=format:', - os.environ['TRAVIS_COMMIT_RANGE'], '--', - ]) - commit_range = os.environ['TRAVIS_COMMIT_RANGE'].split('...') - if len(commit_range) == 1: # if it didn't split, it must have been separated by '..' instead - commit_range = commit_range[0].split('..') - except subprocess.CalledProcessError: - print 'TRAVIS_COMMIT_RANGE is invalid. This seems to be a force push. We will just assume it must be against upstream master and compare all commits in between.' - commit_range = ['upstream/master', 'HEAD'] diff -Nru docker.io-0.9.1~dfsg1/hack/travis/gofmt.py docker.io-1.3.2~dfsg1/hack/travis/gofmt.py --- docker.io-0.9.1~dfsg1/hack/travis/gofmt.py 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/travis/gofmt.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -#!/usr/bin/env python -import subprocess - -from env import commit_range - -files = subprocess.check_output([ - 'git', 'diff', '--diff-filter=ACMR', - '--name-only', '...'.join(commit_range), '--', -]) - -exit_status = 0 - -for filename in files.split('\n'): - if filename.startswith('vendor/'): - continue # we can't be changing our upstream vendors for gofmt, so don't even check them - - if filename.endswith('.go'): - try: - out = subprocess.check_output(['gofmt', '-s', '-l', filename]) - if out != '': - print out, - exit_status = 1 - except subprocess.CalledProcessError: - exit_status = 1 - -if exit_status != 0: - print 'Reformat the files listed above with "gofmt -s -w" and try again.' - exit(exit_status) - -print 'All files pass gofmt.' -exit(0) diff -Nru docker.io-0.9.1~dfsg1/hack/vendor.sh docker.io-1.3.2~dfsg1/hack/vendor.sh --- docker.io-0.9.1~dfsg1/hack/vendor.sh 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/hack/vendor.sh 2014-11-24 17:38:01.000000000 +0000 @@ -39,22 +39,33 @@ echo done } -clone git github.com/kr/pty 3b1f6487b +clone git github.com/kr/pty 67e2db24c8 -clone git github.com/gorilla/context 708054d61e5 +clone git github.com/gorilla/context 14f550f51a -clone git github.com/gorilla/mux 9b36453141c +clone git github.com/gorilla/mux 136d54f81f -clone git github.com/syndtr/gocapability 3454319be2 +clone git github.com/tchap/go-patricia v1.0.1 clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 -# get Go tip's archive/tar, for xattr support -# TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep -clone hg code.google.com/p/go a15f344a9efa -mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar -rm -rf src/code.google.com/p/go -mkdir -p src/code.google.com/p/go/src/pkg/archive -mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar +clone git github.com/docker/libtrust d273ef2565ca + +# get Go tip's archive/tar, for xattr support and improved performance +# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep +if [ "$1" = '--go' ]; then + # Go takes forever and a half to clone, so we only redownload it when explicitly requested via the "--go" flag to this script. + clone hg code.google.com/p/go 1b17b3426e3c + mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar + rm -rf src/code.google.com/p/go + mkdir -p src/code.google.com/p/go/src/pkg/archive + mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar +fi + +clone git github.com/docker/libcontainer 8d1d0ba38a7348c5cfdc05aea3be34d75aadc8de +# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) +rm -rf src/github.com/docker/libcontainer/vendor +eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" +# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include diff -Nru docker.io-0.9.1~dfsg1/image/graph.go docker.io-1.3.2~dfsg1/image/graph.go --- docker.io-0.9.1~dfsg1/image/graph.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/image/graph.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +package image + +import ( + "github.com/docker/docker/daemon/graphdriver" +) + +type Graph interface { + Get(id string) (*Image, error) + ImageRoot(id string) string + Driver() graphdriver.Driver +} diff -Nru docker.io-0.9.1~dfsg1/image/image.go docker.io-1.3.2~dfsg1/image/image.go --- docker.io-0.9.1~dfsg1/image/image.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/image/image.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,255 @@ +package image + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +// Set the max depth to the aufs default that most +// kernels are compiled with +// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk +const MaxImageDepth = 127 + +type Image struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig runconfig.Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *runconfig.Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + OS string `json:"os,omitempty"` + Size int64 + + graph Graph +} + +func LoadImage(root string) (*Image, error) { + // Load the json data + jsonData, err := ioutil.ReadFile(jsonPath(root)) + if err != nil { + return nil, err + } + img := &Image{} + + if err := json.Unmarshal(jsonData, img); err != nil { + return nil, err + } + if err := utils.ValidateID(img.ID); err != nil { + return nil, err + } + + if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + // If the layersize file does not exist then set the size to a negative number + // because a layer size of 0 (zero) is valid + img.Size = -1 + } else { + size, err := strconv.Atoi(string(buf)) + if err != nil { + return nil, err + } + img.Size = int64(size) + } + + return img, nil +} + +func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root string) error { + // Store the layer + var ( + size int64 + err error + driver = img.graph.Driver() + ) + + // If layerData is not nil, unpack it into the new layer + if layerData != nil { + if size, err = driver.ApplyDiff(img.ID, img.Parent, layerData); err != nil { + return err + } + } + + img.Size = size + if err := img.SaveSize(root); err != nil { + return err + } + + // If raw json is provided, then use it + if jsonData != nil { + if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { + return err + } + } else { + if jsonData, err = json.Marshal(img); err != nil { + return err + } + if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { + return err + } + } + return nil +} + +func (img *Image) SetGraph(graph Graph) { + img.graph = graph +} + +// SaveSize stores the current `size` value of `img` in the directory `root`. +func (img *Image) SaveSize(root string) error { + if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { + return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) + } + return nil +} + +func jsonPath(root string) string { + return path.Join(root, "json") +} + +func (img *Image) RawJson() ([]byte, error) { + root, err := img.root() + if err != nil { + return nil, fmt.Errorf("Failed to get root for image %s: %s", img.ID, err) + } + fh, err := os.Open(jsonPath(root)) + if err != nil { + return nil, fmt.Errorf("Failed to open json for image %s: %s", img.ID, err) + } + buf, err := ioutil.ReadAll(fh) + if err != nil { + return nil, fmt.Errorf("Failed to read json for image %s: %s", img.ID, err) + } + return buf, nil +} + +// TarLayer returns a tar archive of the image's filesystem layer. +func (img *Image) TarLayer() (arch archive.Archive, err error) { + if img.graph == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) + } + + driver := img.graph.Driver() + + return driver.Diff(img.ID, img.Parent) +} + +// Image includes convenience proxy functions to its graph +// These functions will return an error if the image is not registered +// (ie. if image.graph == nil) +func (img *Image) History() ([]*Image, error) { + var parents []*Image + if err := img.WalkHistory( + func(img *Image) error { + parents = append(parents, img) + return nil + }, + ); err != nil { + return nil, err + } + return parents, nil +} + +func (img *Image) WalkHistory(handler func(*Image) error) (err error) { + currentImg := img + for currentImg != nil { + if handler != nil { + if err := handler(currentImg); err != nil { + return err + } + } + currentImg, err = currentImg.GetParent() + if err != nil { + return fmt.Errorf("Error while getting parent image: %v", err) + } + } + return nil +} + +func (img *Image) GetParent() (*Image, error) { + if img.Parent == "" { + return nil, nil + } + if img.graph == nil { + return nil, fmt.Errorf("Can't lookup parent of unregistered image") + } + return img.graph.Get(img.Parent) +} + +func (img *Image) root() (string, error) { + if img.graph == nil { + return "", fmt.Errorf("Can't lookup root of unregistered image") + } + return img.graph.ImageRoot(img.ID), nil +} + +func (img *Image) GetParentsSize(size int64) int64 { + parentImage, err := img.GetParent() + if err != nil || parentImage == nil { + return size + } + size += parentImage.Size + return parentImage.GetParentsSize(size) +} + +// Depth returns the number of parents for a +// current image +func (img *Image) Depth() (int, error) { + var ( + count = 0 + parent = img + err error + ) + + for parent != nil { + count++ + parent, err = parent.GetParent() + if err != nil { + return -1, err + } + } + return count, nil +} + +// CheckDepth returns an error if the depth of an image, as returned +// by ImageDepth, is too large to support creating a container from it +// on this daemon. +func (img *Image) CheckDepth() error { + // We add 2 layers to the depth because the container's rw and + // init layer add to the restriction + depth, err := img.Depth() + if err != nil { + return err + } + if depth+2 >= MaxImageDepth { + return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + } + return nil +} + +// Build an Image object from raw json data +func NewImgJSON(src []byte) (*Image, error) { + ret := &Image{} + + log.Debugf("Json string: {%s}", src) + // FIXME: Is there a cleaner way to "purify" the input json? + if err := json.Unmarshal(src, ret); err != nil { + return nil, err + } + return ret, nil +} diff -Nru docker.io-0.9.1~dfsg1/image.go docker.io-1.3.2~dfsg1/image.go --- docker.io-0.9.1~dfsg1/image.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/image.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,318 +0,0 @@ -package docker - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "strconv" - "strings" - "time" -) - -type Image struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig runconfig.Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *runconfig.Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - OS string `json:"os,omitempty"` - graph *Graph - Size int64 -} - -func LoadImage(root string) (*Image, error) { - // Load the json data - jsonData, err := ioutil.ReadFile(jsonPath(root)) - if err != nil { - return nil, err - } - img := &Image{} - - if err := json.Unmarshal(jsonData, img); err != nil { - return nil, err - } - if err := ValidateID(img.ID); err != nil { - return nil, err - } - - if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { - if !os.IsNotExist(err) { - return nil, err - } - // If the layersize file does not exist then set the size to a negative number - // because a layer size of 0 (zero) is valid - img.Size = -1 - } else { - size, err := strconv.Atoi(string(buf)) - if err != nil { - return nil, err - } - img.Size = int64(size) - } - - return img, nil -} - -func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { - // Store the layer - var ( - size int64 - err error - driver = img.graph.driver - ) - if err := os.MkdirAll(layer, 0755); err != nil { - return err - } - - // If layerData is not nil, unpack it into the new layer - if layerData != nil { - if differ, ok := driver.(graphdriver.Differ); ok { - if err := differ.ApplyDiff(img.ID, layerData); err != nil { - return err - } - - if size, err = differ.DiffSize(img.ID); err != nil { - return err - } - } else { - start := time.Now().UTC() - utils.Debugf("Start untar layer") - if err := archive.ApplyLayer(layer, layerData); err != nil { - return err - } - utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - if img.Parent == "" { - if size, err = utils.TreeSize(layer); err != nil { - return err - } - } else { - parent, err := driver.Get(img.Parent) - if err != nil { - return err - } - defer driver.Put(img.Parent) - changes, err := archive.ChangesDirs(layer, parent) - if err != nil { - return err - } - size = archive.ChangesSize(layer, changes) - } - } - } - - img.Size = size - if err := img.SaveSize(root); err != nil { - return err - } - - // If raw json is provided, then use it - if jsonData != nil { - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } else { - if jsonData, err = json.Marshal(img); err != nil { - return err - } - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } - return nil -} - -// SaveSize stores the current `size` value of `img` in the directory `root`. -func (img *Image) SaveSize(root string) error { - if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { - return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) - } - return nil -} - -func jsonPath(root string) string { - return path.Join(root, "json") -} - -// TarLayer returns a tar archive of the image's filesystem layer. -func (img *Image) TarLayer() (arch archive.Archive, err error) { - if img.graph == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) - } - driver := img.graph.driver - if differ, ok := driver.(graphdriver.Differ); ok { - return differ.Diff(img.ID) - } - - imgFs, err := driver.Get(img.ID) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(img.ID) - } - }() - - if img.Parent == "" { - archive, err := archive.Tar(imgFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(img.ID) - return err - }), nil - } - - parentFs, err := driver.Get(img.Parent) - if err != nil { - return nil, err - } - defer driver.Put(img.Parent) - changes, err := archive.ChangesDirs(imgFs, parentFs) - if err != nil { - return nil, err - } - archive, err := archive.ExportChanges(imgFs, changes) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(img.ID) - return err - }), nil -} - -func ValidateID(id string) error { - if id == "" { - return fmt.Errorf("Image id can't be empty") - } - if strings.Contains(id, ":") { - return fmt.Errorf("Invalid character in image id: ':'") - } - return nil -} - -func GenerateID() string { - for { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - value := hex.EncodeToString(id) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numberic and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.Atoi(utils.TruncateID(value)); err == nil { - continue - } - return value - } -} - -// Image includes convenience proxy functions to its graph -// These functions will return an error if the image is not registered -// (ie. if image.graph == nil) -func (img *Image) History() ([]*Image, error) { - var parents []*Image - if err := img.WalkHistory( - func(img *Image) error { - parents = append(parents, img) - return nil - }, - ); err != nil { - return nil, err - } - return parents, nil -} - -func (img *Image) WalkHistory(handler func(*Image) error) (err error) { - currentImg := img - for currentImg != nil { - if handler != nil { - if err := handler(currentImg); err != nil { - return err - } - } - currentImg, err = currentImg.GetParent() - if err != nil { - return fmt.Errorf("Error while getting parent image: %v", err) - } - } - return nil -} - -func (img *Image) GetParent() (*Image, error) { - if img.Parent == "" { - return nil, nil - } - if img.graph == nil { - return nil, fmt.Errorf("Can't lookup parent of unregistered image") - } - return img.graph.Get(img.Parent) -} - -func (img *Image) root() (string, error) { - if img.graph == nil { - return "", fmt.Errorf("Can't lookup root of unregistered image") - } - return img.graph.imageRoot(img.ID), nil -} - -func (img *Image) getParentsSize(size int64) int64 { - parentImage, err := img.GetParent() - if err != nil || parentImage == nil { - return size - } - size += parentImage.Size - return parentImage.getParentsSize(size) -} - -// Depth returns the number of parents for a -// current image -func (img *Image) Depth() (int, error) { - var ( - count = 0 - parent = img - err error - ) - - for parent != nil { - count++ - parent, err = parent.GetParent() - if err != nil { - return -1, err - } - } - return count, nil -} - -// Build an Image object from raw json data -func NewImgJSON(src []byte) (*Image, error) { - ret := &Image{} - - utils.Debugf("Json string: {%s}", src) - // FIXME: Is there a cleaner way to "purify" the input json? - if err := json.Unmarshal(src, ret); err != nil { - return nil, err - } - return ret, nil -} diff -Nru docker.io-0.9.1~dfsg1/integration/api_test.go docker.io-1.3.2~dfsg1/integration/api_test.go --- docker.io-0.9.1~dfsg1/integration/api_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/api_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,13 +5,6 @@ "bytes" "encoding/json" "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net" @@ -20,285 +13,17 @@ "strings" "testing" "time" -) - -func TestGetVersion(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - var err error - r := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "/version", nil) - if err != nil { - t.Fatal(err) - } - // FIXME getting the version should require an actual running Server - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - out := engine.NewOutput() - v, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, r.Body); err != nil { - t.Fatal(err) - } - out.Close() - expected := dockerversion.VERSION - if result := v.Get("Version"); result != expected { - t.Errorf("Expected version %s, %s found", expected, result) - } - expected = "application/json" - if result := r.HeaderMap.Get("Content-Type"); result != expected { - t.Errorf("Expected Content-Type %s, %s found", expected, result) - } -} - -func TestGetInfo(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - job := eng.Job("images") - initialImages, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("GET", "/info", nil) - if err != nil { - t.Fatal(err) - } - r := httptest.NewRecorder() - - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - out := engine.NewOutput() - i, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, r.Body); err != nil { - t.Fatal(err) - } - out.Close() - if images := i.GetInt("Images"); images != initialImages.Len() { - t.Errorf("Expected images: %d, %d found", initialImages.Len(), images) - } - expected := "application/json" - if result := r.HeaderMap.Get("Content-Type"); result != expected { - t.Errorf("Expected Content-Type %s, %s found", expected, result) - } -} - -func TestGetEvents(t *testing.T) { - eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - // FIXME: we might not need runtime, why not simply nuke - // the engine? - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) - - var events []*utils.JSONMessage - for _, parts := range [][3]string{ - {"fakeaction", "fakeid", "fakeimage"}, - {"fakeaction2", "fakeid", "fakeimage"}, - } { - action, id, from := parts[0], parts[1], parts[2] - ev := srv.LogEvent(action, id, from) - events = append(events, ev) - } - - req, err := http.NewRequest("GET", "/events?since=1", nil) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - setTimeout(t, "", 500*time.Millisecond, func() { - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - }) - - dec := json.NewDecoder(r.Body) - for i := 0; i < 2; i++ { - var jm utils.JSONMessage - if err := dec.Decode(&jm); err == io.EOF { - break - } else if err != nil { - t.Fatal(err) - } - if jm != *events[i] { - t.Fatalf("Event received it different than expected") - } - } - -} - -func TestGetImagesJSON(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - job := eng.Job("images") - initialImages, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("GET", "/images/json?all=0", nil) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - images := engine.NewTable("Created", 0) - if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - - if images.Len() != initialImages.Len() { - t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) - } - - found := false - for _, img := range images.Data { - if strings.Contains(img.GetList("RepoTags")[0], unitTestImageName) { - found = true - break - } - } - if !found { - t.Errorf("Expected image %s, %+v found", unitTestImageName, images) - } - - r2 := httptest.NewRecorder() - - // all=1 - - initialImages = getAllImages(eng, t) - - req2, err := http.NewRequest("GET", "/images/json?all=true", nil) - if err != nil { - t.Fatal(err) - } - if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { - t.Fatal(err) - } - assertHttpNotError(r2, t) - - images2 := engine.NewTable("Id", 0) - if _, err := images2.ReadListFrom(r2.Body.Bytes()); err != nil { - t.Fatal(err) - } - - if images2.Len() != initialImages.Len() { - t.Errorf("Expected %d image, %d found", initialImages.Len(), images2.Len()) - } - - found = false - for _, img := range images2.Data { - if img.Get("Id") == unitTestImageID { - found = true - break - } - } - if !found { - t.Errorf("Retrieved image Id differs, expected %s, received %+v", unitTestImageID, images2) - } - - r3 := httptest.NewRecorder() - - // filter=a - req3, err := http.NewRequest("GET", "/images/json?filter=aaaaaaaaaa", nil) - if err != nil { - t.Fatal(err) - } - - if err := api.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil { - t.Fatal(err) - } - assertHttpNotError(r3, t) - - images3 := engine.NewTable("Id", 0) - if _, err := images3.ReadListFrom(r3.Body.Bytes()); err != nil { - t.Fatal(err) - } - - if images3.Len() != 0 { - t.Errorf("Expected 0 image, %d found", images3.Len()) - } -} - -func TestGetImagesHistory(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - r := httptest.NewRecorder() - - req, err := http.NewRequest("GET", fmt.Sprintf("/images/%s/history", unitTestImageName), nil) - if err != nil { - t.Fatal(err) - } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - if len(outs.Data) != 1 { - t.Errorf("Expected 1 line, %d found", len(outs.Data)) - } -} - -func TestGetImagesByName(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - img := &docker.Image{} - if err := json.Unmarshal(r.Body.Bytes(), img); err != nil { - t.Fatal(err) - } - if img.ID != unitTestImageID { - t.Errorf("Error inspecting image") - } -} + "github.com/docker/docker/api" + "github.com/docker/docker/api/server" + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) func TestGetContainersJSON(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() job := eng.Job("containers") job.SetenvBool("all", true) @@ -326,7 +51,7 @@ } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -344,7 +69,7 @@ func TestGetContainersExport(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, @@ -362,7 +87,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -392,7 +117,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // save image r := httptest.NewRecorder() @@ -400,7 +125,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -414,7 +139,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -427,7 +152,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { @@ -440,7 +165,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -453,7 +178,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -463,7 +188,7 @@ func TestGetContainersChanges(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, @@ -480,7 +205,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -503,7 +228,7 @@ func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -514,7 +239,7 @@ t, ) defer func() { - // Make sure the process dies before destroying runtime + // Make sure the process dies before destroying daemon containerKill(eng, containerID, t) containerWait(eng, containerID, t) }() @@ -547,7 +272,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -577,60 +302,66 @@ } } -func TestGetContainersByName(t *testing.T) { +func TestPostCommit(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"echo", "test"}, + Cmd: []string{"touch", "/test"}, }, t, ) - r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/containers/"+containerID+"/json", nil) + containerRun(eng, containerID, t) + + req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) - outContainer := &docker.Container{} - if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil { + if r.Code != http.StatusCreated { + t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) + } + + var env engine.Env + if err := env.Decode(r.Body); err != nil { t.Fatal(err) } - if outContainer.ID != containerID { - t.Fatalf("Wrong containers retrieved. Expected %s, received %s", containerID, outContainer.ID) + if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { + t.Fatalf("The image has not been committed") } } -func TestPostCommit(t *testing.T) { +func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) + defer mkDaemonFromEngine(eng, t).Nuke() - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"touch", "/test"}, - }, - t, - ) - - containerRun(eng, containerID, t) + configJSON, err := json.Marshal(&runconfig.Config{ + Image: unitTestImageID, + Memory: 33554432, + Cmd: []string{"touch", "/test"}, + }) + if err != nil { + t.Fatal(err) + } - req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) if err != nil { t.Fatal(err) } + req.Header.Set("Content-Type", "application/json") + r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -638,18 +369,23 @@ t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - var env engine.Env - if err := env.Decode(r.Body); err != nil { + var apiRun engine.Env + if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } - if _, err := srv.ImageInspect(env.Get("Id")); err != nil { - t.Fatalf("The image has not been committed") + containerID := apiRun.Get("Id") + + containerAssertExists(eng, containerID, t) + containerRun(eng, containerID, t) + + if !containerFileExists(eng, containerID, "test", t) { + t.Fatal("Test file was not created") } } -func TestPostContainersCreate(t *testing.T) { +func TestPostJsonVerify(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, @@ -666,7 +402,70 @@ } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + + // Don't add Content-Type header + // req.Header.Set("Content-Type", "application/json") + + err = server.ServeRequest(eng, api.APIVERSION, r, req) + if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { + t.Fatal("Create should have failed due to no Content-Type header - got:", r) + } + + // Now add header but with wrong type and retest + req.Header.Set("Content-Type", "application/xml") + + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { + t.Fatal("Create should have failed due to wrong Content-Type header - got:", r) + } +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func TestPostCreateNull(t *testing.T) { + eng := NewTestEngine(t) + daemon := mkDaemonFromEngine(eng, t) + defer daemon.Nuke() + + configStr := fmt.Sprintf(`{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"%s", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}`, unitTestImageID) + + req, err := http.NewRequest("POST", "/containers/create", strings.NewReader(configStr)) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Content-Type", "application/json") + + r := httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -681,16 +480,16 @@ containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) - containerRun(eng, containerID, t) - if !containerFileExists(eng, containerID, "test", t) { - t.Fatal("Test file was not created") + c := daemon.Get(containerID) + if c.Config.Cpuset != "" { + t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) } } func TestPostContainersKill(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -715,7 +514,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -729,7 +528,7 @@ func TestPostContainersRestart(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -754,7 +553,7 @@ t.Fatal(err) } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -774,7 +573,7 @@ func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer( eng, @@ -796,7 +595,7 @@ req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -805,64 +604,31 @@ } containerAssertExists(eng, containerID, t) - // Give some time to the process to start - // FIXME: use Wait once it's available as a job - containerWaitTimeout(eng, containerID, t) - if !containerRunning(eng, containerID, t) { - t.Errorf("Container should be running") - } - - r = httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - // Starting an already started container should return an error - // FIXME: verify a precise error code. There is a possible bug here - // which causes this to return 404 even though the container exists. - assertHttpError(r, t) - containerAssertExists(eng, containerID, t) - containerKill(eng, containerID, t) -} - -// Expected behaviour: using / as a bind mount source should throw an error -func TestRunErrorBindMountRootSource(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - containerID := createTestContainer( - eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"/bin/cat"}, - OpenStdin: true, - }, - t, - ) - - hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{ - Binds: []string{"/:/tmp"}, - }) - req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) + req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") - r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + r = httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } - if r.Code != http.StatusInternalServerError { - containerKill(eng, containerID, t) - t.Fatal("should have failed to run when using / as a source for the bind mount") + + // Starting an already started container should return a 304 + assertHttpNotError(r, t) + if r.Code != http.StatusNotModified { + t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) } + containerAssertExists(eng, containerID, t) + containerKill(eng, containerID, t) } func TestPostContainersStop(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -888,7 +654,7 @@ t.Fatal(err) } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -898,11 +664,27 @@ if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been stopped") } + + req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) + } + + r = httptest.NewRecorder() + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + + // Stopping an already stopper container should return a 304 + assertHttpNotError(r, t) + if r.Code != http.StatusNotModified { + t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) + } } func TestPostContainersWait(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -920,7 +702,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -940,7 +722,7 @@ func TestPostContainersAttach(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -978,7 +760,7 @@ t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -1018,7 +800,7 @@ func TestPostContainersAttachStderr(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ @@ -1056,7 +838,7 @@ t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -1094,45 +876,16 @@ containerWait(eng, containerID, t) } -// FIXME: Test deleting running container -// FIXME: Test deleting container with volume -// FIXME: Test deleting volume in use by other container -func TestDeleteContainers(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"touch", "/test"}, - }, - t, - ) - req, err := http.NewRequest("DELETE", "/containers/"+containerID, nil) - if err != nil { - t.Fatal(err) - } - r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - if r.Code != http.StatusNoContent { - t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) - } - containerAssertNotExists(eng, containerID, t) -} - func TestOptionsRoute(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1143,7 +896,7 @@ func TestGetEnabledCors(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() @@ -1151,7 +904,7 @@ if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1178,7 +931,7 @@ eng := NewTestEngine(t) //we expect errors, so we disable stderr eng.Stderr = ioutil.Discard - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() initialImages := getImages(eng, t, true, "") @@ -1198,7 +951,7 @@ } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusConflict { @@ -1211,7 +964,7 @@ } r2 := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) @@ -1235,7 +988,7 @@ func TestPostContainersCopy(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, @@ -1263,7 +1016,7 @@ t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1293,7 +1046,7 @@ func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() @@ -1311,7 +1064,7 @@ t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { @@ -1319,6 +1072,86 @@ } } +// Regression test for https://github.com/docker/docker/issues/6231 +func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) { + eng := NewTestEngine(t) + defer mkDaemonFromEngine(eng, t).Nuke() + + r := httptest.NewRecorder() + + var testData engine.Env + testData.Set("Image", "docker-test-image") + testData.SetAuto("Volumes", map[string]struct{}{"/foo": {}}) + testData.Set("Cmd", "true") + jsonData := bytes.NewBuffer(nil) + if err := testData.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err := http.NewRequest("POST", "/containers/create?name=chunk_test", jsonData) + if err != nil { + t.Fatal(err) + } + + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + var testData2 engine.Env + testData2.SetAuto("Binds", []string{"/tmp:/foo"}) + jsonData = bytes.NewBuffer(nil) + if err := testData2.Encode(jsonData); err != nil { + t.Fatal(err) + } + + req, err = http.NewRequest("POST", "/containers/chunk_test/start", jsonData) + if err != nil { + t.Fatal(err) + } + + req.Header.Add("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // http://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + type config struct { + HostConfig struct { + Binds []string + } + } + + req, err = http.NewRequest("GET", "/containers/chunk_test/json", nil) + if err != nil { + t.Fatal(err) + } + + r2 := httptest.NewRecorder() + req.Header.Add("Content-Type", "application/json") + if err := server.ServeRequest(eng, api.APIVERSION, r2, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + c := config{} + + json.Unmarshal(r2.Body.Bytes(), &c) + + if len(c.HostConfig.Binds) == 0 { + t.Fatal("Chunked Encoding not handled") + } + + if c.HostConfig.Binds[0] != "/tmp:/foo" { + t.Fatal("Chunked encoding not properly handled, execpted binds to be /tmp:/foo, got:", c.HostConfig.Binds[0]) + } +} + // Mocked types for tests type NopConn struct { io.ReadCloser diff -Nru docker.io-0.9.1~dfsg1/integration/auth_test.go docker.io-1.3.2~dfsg1/integration/auth_test.go --- docker.io-0.9.1~dfsg1/integration/auth_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/auth_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -package docker - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "github.com/dotcloud/docker/auth" - "os" - "strings" - "testing" -) - -// FIXME: these tests have an external dependency on a staging index hosted -// on the docker.io infrastructure. That dependency should be removed. -// - Unit tests should have no side-effect dependencies. -// - Integration tests should have side-effects limited to the host environment being tested. - -func TestLogin(t *testing.T) { - os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") - defer os.Setenv("DOCKER_INDEX_URL", "") - authConfig := &auth.AuthConfig{ - Username: "unittester", - Password: "surlautrerivejetattendrai", - Email: "noise+unittester@docker.com", - ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", - } - status, err := auth.Login(authConfig, nil) - if err != nil { - t.Fatal(err) - } - if status != "Login Succeeded" { - t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status) - } -} - -func TestCreateAccount(t *testing.T) { - tokenBuffer := make([]byte, 16) - _, err := rand.Read(tokenBuffer) - if err != nil { - t.Fatal(err) - } - token := hex.EncodeToString(tokenBuffer)[:12] - username := "ut" + token - authConfig := &auth.AuthConfig{ - Username: username, - Password: "test42", - Email: fmt.Sprintf("docker-ut+%s@example.com", token), - ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", - } - status, err := auth.Login(authConfig, nil) - if err != nil { - t.Fatal(err) - } - expectedStatus := fmt.Sprintf( - "Account created. Please see the documentation of the registry %s for instructions how to activate it.", - authConfig.ServerAddress, - ) - if status != expectedStatus { - t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) - } - - status, err = auth.Login(authConfig, nil) - if err == nil { - t.Fatalf("Expected error but found nil instead") - } - - expectedError := "Login: Account is not Active" - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err) - } -} diff -Nru docker.io-0.9.1~dfsg1/integration/buildfile_test.go docker.io-1.3.2~dfsg1/integration/buildfile_test.go --- docker.io-0.9.1~dfsg1/integration/buildfile_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/buildfile_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,968 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -// A testContextTemplate describes a build context and how to test it -type testContextTemplate struct { - // Contents of the Dockerfile - dockerfile string - // Additional files in the context, eg [][2]string{"./passwd", "gordon"} - files [][2]string - // Additional remote files to host on a local HTTP server. - remoteFiles [][2]string -} - -func (context testContextTemplate) Archive(dockerfile string, t *testing.T) archive.Archive { - input := []string{"Dockerfile", dockerfile} - for _, pair := range context.files { - input = append(input, pair[0], pair[1]) - } - a, err := archive.Generate(input...) - if err != nil { - t.Fatal(err) - } - return a -} - -// A table of all the contexts to build and test. -// A new docker runtime will be created and torn down for each context. -var testContexts = []testContextTemplate{ - { - ` -from {IMAGE} -run sh -c 'echo root:testpass > /tmp/passwd' -run mkdir -p /var/run/sshd -run [ "$(cat /tmp/passwd)" = "root:testpass" ] -run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] -`, - nil, - nil, - }, - - // Exactly the same as above, except uses a line split with a \ to test - // multiline support. - { - ` -from {IMAGE} -run sh -c 'echo root:testpass \ - > /tmp/passwd' -run mkdir -p /var/run/sshd -run [ "$(cat /tmp/passwd)" = "root:testpass" ] -run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] -`, - nil, - nil, - }, - - // Line containing literal "\n" - { - ` -from {IMAGE} -run sh -c 'echo root:testpass > /tmp/passwd' -run echo "foo \n bar"; echo "baz" -run mkdir -p /var/run/sshd -run [ "$(cat /tmp/passwd)" = "root:testpass" ] -run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] -`, - nil, - nil, - }, - { - ` -from {IMAGE} -add foo /usr/lib/bla/bar -run [ "$(cat /usr/lib/bla/bar)" = 'hello' ] -add http://{SERVERADDR}/baz /usr/lib/baz/quux -run [ "$(cat /usr/lib/baz/quux)" = 'world!' ] -`, - [][2]string{{"foo", "hello"}}, - [][2]string{{"/baz", "world!"}}, - }, - - { - ` -from {IMAGE} -add f / -run [ "$(cat /f)" = "hello" ] -add f /abc -run [ "$(cat /abc)" = "hello" ] -add f /x/y/z -run [ "$(cat /x/y/z)" = "hello" ] -add f /x/y/d/ -run [ "$(cat /x/y/d/f)" = "hello" ] -add d / -run [ "$(cat /ga)" = "bu" ] -add d /somewhere -run [ "$(cat /somewhere/ga)" = "bu" ] -add d /anotherplace/ -run [ "$(cat /anotherplace/ga)" = "bu" ] -add d /somewheeeere/over/the/rainbooow -run [ "$(cat /somewheeeere/over/the/rainbooow/ga)" = "bu" ] -`, - [][2]string{ - {"f", "hello"}, - {"d/ga", "bu"}, - }, - nil, - }, - - { - ` -from {IMAGE} -add http://{SERVERADDR}/x /a/b/c -run [ "$(cat /a/b/c)" = "hello" ] -add http://{SERVERADDR}/x?foo=bar / -run [ "$(cat /x)" = "hello" ] -add http://{SERVERADDR}/x /d/ -run [ "$(cat /d/x)" = "hello" ] -add http://{SERVERADDR} /e -run [ "$(cat /e)" = "blah" ] -`, - nil, - [][2]string{{"/x", "hello"}, {"/", "blah"}}, - }, - - // Comments, shebangs, and executability, oh my! - { - ` -FROM {IMAGE} -# This is an ordinary comment. -RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh -RUN [ ! -x /hello.sh ] -RUN chmod +x /hello.sh -RUN [ -x /hello.sh ] -RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] -RUN [ "$(/hello.sh)" = "hello world" ] -`, - nil, - nil, - }, - - // Users and groups - { - ` -FROM {IMAGE} - -# Make sure our defaults work -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] - -# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) -USER root -RUN [ "$(id -G):$(id -Gn)" = '0:root' ] - -# Setup dockerio user and group -RUN echo 'dockerio:x:1000:1000::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1000:' >> /etc/group - -# Make sure we can switch to our user and all the information is exactly as we expect it to be -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] - -# Switch back to root and double check that worked exactly as we might expect it to -USER root -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0:root' ] - -# Add a "supplementary" group for our dockerio user -RUN echo 'supplementary:x:1001:dockerio' >> /etc/group - -# ... and then go verify that we get it like we expect -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] -USER 1000 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] - -# super test the new "user:group" syntax -USER dockerio:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] -USER 1000:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] -USER dockerio:1000 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] -USER 1000:1000 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] -USER dockerio:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] -USER dockerio:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] -USER 1000:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] -USER 1000:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] - -# make sure unknown uid/gid still works properly -USER 1042:1043 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ] -`, - nil, - nil, - }, - - // Environment variable - { - ` -from {IMAGE} -env FOO BAR -run [ "$FOO" = "BAR" ] -`, - nil, - nil, - }, - - // Environment overwriting - { - ` -from {IMAGE} -env FOO BAR -run [ "$FOO" = "BAR" ] -env FOO BAZ -run [ "$FOO" = "BAZ" ] -`, - nil, - nil, - }, - - { - ` -from {IMAGE} -ENTRYPOINT /bin/echo -CMD Hello world -`, - nil, - nil, - }, - - { - ` -from {IMAGE} -VOLUME /test -CMD Hello world -`, - nil, - nil, - }, - - { - ` -from {IMAGE} -env FOO /foo/baz -env BAR /bar -env BAZ $BAR -env FOOPATH $PATH:$FOO -run [ "$BAR" = "$BAZ" ] -run [ "$FOOPATH" = "$PATH:/foo/baz" ] -`, - nil, - nil, - }, - - { - ` -from {IMAGE} -env FOO /bar -env TEST testdir -env BAZ /foobar -add testfile $BAZ/ -add $TEST $FOO -run [ "$(cat /foobar/testfile)" = "test1" ] -run [ "$(cat /bar/withfile)" = "test2" ] -`, - [][2]string{ - {"testfile", "test1"}, - {"testdir/withfile", "test2"}, - }, - nil, - }, - - // JSON! - { - ` -FROM {IMAGE} -RUN ["/bin/echo","hello","world"] -CMD ["/bin/true"] -ENTRYPOINT ["/bin/echo","your command -->"] -`, - nil, - nil, - }, - { - ` -FROM {IMAGE} -ADD test /test -RUN ["chmod","+x","/test"] -RUN ["/test"] -RUN [ "$(cat /testfile)" = 'test!' ] -`, - [][2]string{ - {"test", "#!/bin/sh\necho 'test!' > /testfile"}, - }, - nil, - }, -} - -// FIXME: test building with 2 successive overlapping ADD commands - -func constructDockerfile(template string, ip net.IP, port string) string { - serverAddr := fmt.Sprintf("%s:%s", ip, port) - replacer := strings.NewReplacer("{IMAGE}", unitTestImageID, "{SERVERADDR}", serverAddr) - return replacer.Replace(template) -} - -func mkTestingFileServer(files [][2]string) (*httptest.Server, error) { - mux := http.NewServeMux() - for _, file := range files { - name, contents := file[0], file[1] - mux.HandleFunc(name, func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(contents)) - }) - } - - // This is how httptest.NewServer sets up a net.Listener, except that our listener must accept remote - // connections (from the container). - listener, err := net.Listen("tcp", ":0") - if err != nil { - return nil, err - } - - s := httptest.NewUnstartedServer(mux) - s.Listener = listener - s.Start() - return s, nil -} - -func TestBuild(t *testing.T) { - for _, ctx := range testContexts { - _, err := buildImage(ctx, t, nil, true) - if err != nil { - t.Fatal(err) - } - } -} - -func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*docker.Image, error) { - if eng == nil { - eng = NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - // FIXME: we might not need runtime, why not simply nuke - // the engine? - defer nuke(runtime) - } - srv := mkServerFromEngine(eng, t) - - httpServer, err := mkTestingFileServer(context.remoteFiles) - if err != nil { - t.Fatal(err) - } - defer httpServer.Close() - - idx := strings.LastIndex(httpServer.URL, ":") - if idx < 0 { - t.Fatalf("could not get port from test http server address %s", httpServer.URL) - } - port := httpServer.URL[idx+1:] - - iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") - if iIP == nil { - t.Fatal("Legacy bridgeIP field not set in engine") - } - ip, ok := iIP.(net.IP) - if !ok { - panic("Legacy bridgeIP field in engine does not cast to net.IP") - } - dockerfile := constructDockerfile(context.dockerfile, ip, port) - - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - id, err := buildfile.Build(context.Archive(dockerfile, t)) - if err != nil { - return nil, err - } - - return srv.ImageInspect(id) -} - -func TestVolume(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - volume /test - cmd Hello world - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if len(img.Config.Volumes) == 0 { - t.Fail() - } - for key := range img.Config.Volumes { - if key != "/test" { - t.Fail() - } - } -} - -func TestBuildMaintainer(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Author != "dockerio" { - t.Fail() - } -} - -func TestBuildUser(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - user dockerio - `, nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.User != "dockerio" { - t.Fail() - } -} - -func TestBuildEnv(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - env port 4243 - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - hasEnv := false - for _, envVar := range img.Config.Env { - if envVar == "port=4243" { - hasEnv = true - break - } - } - if !hasEnv { - t.Fail() - } -} - -func TestBuildCmd(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - cmd ["/bin/echo", "Hello World"] - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.Cmd[0] != "/bin/echo" { - t.Log(img.Config.Cmd[0]) - t.Fail() - } - if img.Config.Cmd[1] != "Hello World" { - t.Log(img.Config.Cmd[1]) - t.Fail() - } -} - -func TestBuildExpose(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - expose 4243 - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.PortSpecs[0] != "4243" { - t.Fail() - } -} - -func TestBuildEntrypoint(t *testing.T) { - img, err := buildImage(testContextTemplate{` - from {IMAGE} - entrypoint ["/bin/echo"] - `, - nil, nil}, t, nil, true) - if err != nil { - t.Fatal(err) - } - - if img.Config.Entrypoint[0] != "/bin/echo" { - t.Log(img.Config.Entrypoint[0]) - t.Fail() - } -} - -// testing #1405 - config.Cmd does not get cleaned up if -// utilizing cache -func TestBuildEntrypointRunCleanup(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - img, err := buildImage(testContextTemplate{` - from {IMAGE} - run echo "hello" - `, - nil, nil}, t, eng, true) - if err != nil { - t.Fatal(err) - } - - img, err = buildImage(testContextTemplate{` - from {IMAGE} - run echo "hello" - add foo /foo - entrypoint ["/bin/echo"] - `, - [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true) - if err != nil { - t.Fatal(err) - } - - if len(img.Config.Cmd) != 0 { - t.Fail() - } -} - -func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - img, err := buildImage(template, t, eng, true) - if err != nil { - t.Fatal(err) - } - - imageId = img.ID - - img, err = buildImage(template, t, eng, expectHit) - if err != nil { - t.Fatal(err) - } - - if hit := imageId == img.ID; hit != expectHit { - t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) - } - return -} - -func checkCacheBehaviorFromEngime(t *testing.T, template testContextTemplate, expectHit bool, eng *engine.Engine) (imageId string) { - img, err := buildImage(template, t, eng, true) - if err != nil { - t.Fatal(err) - } - - imageId = img.ID - - img, err = buildImage(template, t, eng, expectHit) - if err != nil { - t.Fatal(err) - } - - if hit := imageId == img.ID; hit != expectHit { - t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) - } - return -} - -func TestBuildImageWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, - nil, nil} - checkCacheBehavior(t, template, true) -} - -func TestBuildImageWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - `, - nil, nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDLocalFileWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - run [ "$(cat /usr/lib/bla/bar)" = "hello" ] - run echo "second" - add . /src/ - run [ "$(cat /src/foo)" = "hello" ] - `, - [][2]string{ - {"foo", "hello"}, - }, - nil} - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - id1 := checkCacheBehaviorFromEngime(t, template, true, eng) - template.files = append(template.files, [2]string{"bar", "hello2"}) - id2 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") - } - id3 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id2 != id3 { - t.Fatal("The cache should have been used but hasn't.") - } - template.files[1][1] = "hello3" - id4 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id3 == id4 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.dockerfile += ` - add ./bar /src2/ - run ls /src2/bar - ` - id5 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id4 == id5 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.files[1][1] = "hello4" - id6 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id5 == id6 { - t.Fatal("The cache should have been invalided but hasn't.") - } - - template.dockerfile += ` - add bar /src2/bar2 - add /bar /src2/bar3 - run ls /src2/bar2 /src2/bar3 - ` - id7 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id6 == id7 { - t.Fatal("The cache should have been invalided but hasn't.") - } - template.files[1][1] = "hello5" - id8 := checkCacheBehaviorFromEngime(t, template, true, eng) - if id7 == id8 { - t.Fatal("The cache should have been invalided but hasn't.") - } -} - -func TestBuildADDLocalFileWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDCurrentDirectoryWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - add . /usr/lib/bla - `, - nil, nil} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDCurrentDirectoryWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - add . /usr/lib/bla - `, - nil, nil} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDRemoteFileWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - nil, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDRemoteFileWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - nil, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, false) -} - -func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, true) -} - -func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { - template := testContextTemplate{` - from {IMAGE} - maintainer dockerio - run echo "first" - add foo /usr/lib/bla/bar - add http://{SERVERADDR}/baz /usr/lib/baz/quux - run echo "second" - `, - [][2]string{{"foo", "hello"}}, - [][2]string{{"/baz", "world!"}}} - checkCacheBehavior(t, template, false) -} - -func TestForbiddenContextPath(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - srv := mkServerFromEngine(eng, t) - - context := testContextTemplate{` - from {IMAGE} - maintainer dockerio - add ../../ test/ - `, - [][2]string{{"test.txt", "test1"}, {"other.txt", "other"}}, nil} - - httpServer, err := mkTestingFileServer(context.remoteFiles) - if err != nil { - t.Fatal(err) - } - defer httpServer.Close() - - idx := strings.LastIndex(httpServer.URL, ":") - if idx < 0 { - t.Fatalf("could not get port from test http server address %s", httpServer.URL) - } - port := httpServer.URL[idx+1:] - - iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") - if iIP == nil { - t.Fatal("Legacy bridgeIP field not set in engine") - } - ip, ok := iIP.(net.IP) - if !ok { - panic("Legacy bridgeIP field in engine does not cast to net.IP") - } - dockerfile := constructDockerfile(context.dockerfile, ip, port) - - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(context.Archive(dockerfile, t)) - - if err == nil { - t.Log("Error should not be nil") - t.Fail() - } - - if err.Error() != "Forbidden path outside the build context: ../../ (/)" { - t.Logf("Error message is not expected: %s", err.Error()) - t.Fail() - } -} - -func TestBuildADDFileNotFound(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - context := testContextTemplate{` - from {IMAGE} - add foo /usr/local/bar - `, - nil, nil} - - httpServer, err := mkTestingFileServer(context.remoteFiles) - if err != nil { - t.Fatal(err) - } - defer httpServer.Close() - - idx := strings.LastIndex(httpServer.URL, ":") - if idx < 0 { - t.Fatalf("could not get port from test http server address %s", httpServer.URL) - } - port := httpServer.URL[idx+1:] - - iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") - if iIP == nil { - t.Fatal("Legacy bridgeIP field not set in engine") - } - ip, ok := iIP.(net.IP) - if !ok { - panic("Legacy bridgeIP field in engine does not cast to net.IP") - } - dockerfile := constructDockerfile(context.dockerfile, ip, port) - - buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) - _, err = buildfile.Build(context.Archive(dockerfile, t)) - - if err == nil { - t.Log("Error should not be nil") - t.Fail() - } - - if err.Error() != "foo: no such file or directory" { - t.Logf("Error message is not expected: %s", err.Error()) - t.Fail() - } -} - -func TestBuildInheritance(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - img, err := buildImage(testContextTemplate{` - from {IMAGE} - expose 4243 - `, - nil, nil}, t, eng, true) - - if err != nil { - t.Fatal(err) - } - - img2, _ := buildImage(testContextTemplate{fmt.Sprintf(` - from %s - entrypoint ["/bin/echo"] - `, img.ID), - nil, nil}, t, eng, true) - - if err != nil { - t.Fatal(err) - } - - // from child - if img2.Config.Entrypoint[0] != "/bin/echo" { - t.Fail() - } - - // from parent - if img.Config.PortSpecs[0] != "4243" { - t.Fail() - } -} - -func TestBuildFails(t *testing.T) { - _, err := buildImage(testContextTemplate{` - from {IMAGE} - run sh -c "exit 23" - `, - nil, nil}, t, nil, true) - - if err == nil { - t.Fatal("Error should not be nil") - } - - sterr, ok := err.(*utils.JSONError) - if !ok { - t.Fatalf("Error should be utils.JSONError") - } - if sterr.Code != 23 { - t.Fatalf("StatusCode %d unexpected, should be 23", sterr.Code) - } -} - -func TestBuildFailsDockerfileEmpty(t *testing.T) { - _, err := buildImage(testContextTemplate{``, nil, nil}, t, nil, true) - - if err != docker.ErrDockerfileEmpty { - t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err) - } -} - -func TestBuildOnBuildTrigger(t *testing.T) { - _, err := buildImage(testContextTemplate{` - from {IMAGE} - onbuild run echo here is the trigger - onbuild run touch foobar - `, - nil, nil, - }, - t, nil, true, - ) - if err != nil { - t.Fatal(err) - } - // FIXME: test that the 'foobar' file was created in the final build. -} - -func TestBuildOnBuildForbiddenChainedTrigger(t *testing.T) { - _, err := buildImage(testContextTemplate{` - from {IMAGE} - onbuild onbuild run echo test - `, - nil, nil, - }, - t, nil, true, - ) - if err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestBuildOnBuildForbiddenFromTrigger(t *testing.T) { - _, err := buildImage(testContextTemplate{` - from {IMAGE} - onbuild from {IMAGE} - `, - nil, nil, - }, - t, nil, true, - ) - if err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) { - _, err := buildImage(testContextTemplate{` - from {IMAGE} - onbuild maintainer test - `, - nil, nil, - }, - t, nil, true, - ) - if err == nil { - t.Fatal("Error should not be nil") - } -} diff -Nru docker.io-0.9.1~dfsg1/integration/commands_test.go docker.io-1.3.2~dfsg1/integration/commands_test.go --- docker.io-0.9.1~dfsg1/integration/commands_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/commands_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,21 +3,18 @@ import ( "bufio" "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" - "os" - "path" - "regexp" - "strconv" "strings" - "syscall" "testing" "time" + + "github.com/docker/docker/api/client" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" ) func closeWrap(args ...io.Closer) error { @@ -35,7 +32,7 @@ return nil } -func setRaw(t *testing.T, c *docker.Container) *term.State { +func setRaw(t *testing.T, c *daemon.Container) *term.State { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) @@ -47,7 +44,7 @@ return state } -func unsetRaw(t *testing.T, c *docker.Container, state *term.State) { +func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) @@ -55,13 +52,13 @@ term.RestoreTerminal(pty.Fd(), state) } -func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container { - var container *docker.Container +func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container { + var container *daemon.Container setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { for { - l := globalRuntime.List() - if len(l) == 1 && l[0].State.IsRunning() { + l := globalDaemon.List() + if len(l) == 1 && l[0].IsRunning() { container = l[0] break } @@ -116,199 +113,17 @@ return nil } -// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname -func TestRunHostname(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdRun("-h", "foobar", unitTestImageID, "hostname"); err != nil { - t.Fatal(err) - } - }() - - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if cmdOutput != "foobar\n" { - t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput) - } - }) - - container := globalRuntime.List()[0] - - setTimeout(t, "CmdRun timed out", 10*time.Second, func() { - <-c - - go func() { - cli.CmdWait(container.ID) - }() - - if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { - t.Fatal(err) - } - }) - - // Cleanup pipes - if err := closeWrap(stdout, stdoutPipe); err != nil { - t.Fatal(err) - } -} - -// TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory -func TestRunWorkdir(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdRun("-w", "/foo/bar", unitTestImageID, "pwd"); err != nil { - t.Fatal(err) - } - }() - - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if cmdOutput != "/foo/bar\n" { - t.Fatalf("'pwd' should display '%s', not '%s'", "/foo/bar\n", cmdOutput) - } - }) - - container := globalRuntime.List()[0] - - setTimeout(t, "CmdRun timed out", 10*time.Second, func() { - <-c - - go func() { - cli.CmdWait(container.ID) - }() - - if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { - t.Fatal(err) - } - }) - - // Cleanup pipes - if err := closeWrap(stdout, stdoutPipe); err != nil { - t.Fatal(err) - } -} - -// TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists -func TestRunWorkdirExists(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdRun("-w", "/proc", unitTestImageID, "pwd"); err != nil { - t.Fatal(err) - } - }() - - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if cmdOutput != "/proc\n" { - t.Fatalf("'pwd' should display '%s', not '%s'", "/proc\n", cmdOutput) - } - }) - - container := globalRuntime.List()[0] - - setTimeout(t, "CmdRun timed out", 5*time.Second, func() { - <-c - - go func() { - cli.CmdWait(container.ID) - }() - - if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { - t.Fatal(err) - } - }) - - // Cleanup pipes - if err := closeWrap(stdout, stdoutPipe); err != nil { - t.Fatal(err) - } -} - -func TestRunExit(t *testing.T) { - stdin, stdinPipe := io.Pipe() - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c1 := make(chan struct{}) - go func() { - cli.CmdRun("-i", unitTestImageID, "/bin/cat") - close(c1) - }() - - setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { - t.Fatal(err) - } - }) - - container := globalRuntime.List()[0] - - // Closing /bin/cat stdin, expect it to exit - if err := stdin.Close(); err != nil { - t.Fatal(err) - } - - // as the process exited, CmdRun must finish and unblock. Wait for it - setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { - <-c1 - - go func() { - cli.CmdWait(container.ID) - }() - - if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { - t.Fatal(err) - } - }) - - // Make sure that the client has been disconnected - setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() { - // Expecting pipe i/o error, just check that read does not block - stdin.Read([]byte{}) - }) - - // Cleanup pipes - if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { - t.Fatal(err) - } -} - // Expected behaviour: the process dies when the client disconnects func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -339,9 +154,9 @@ // Client disconnect after run -i should cause stdin to be closed, which should // cause /bin/cat to exit. setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() { - container := globalRuntime.List()[0] - container.Wait() - if container.State.IsRunning() { + container := globalDaemon.List()[0] + container.WaitStop(-1 * time.Second) + if container.IsRunning() { t.Fatalf("/bin/cat is still running after closing stdin") } }) @@ -353,8 +168,12 @@ stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -363,7 +182,7 @@ // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdRun returns. if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { - utils.Debugf("Error CmdRun: %s", err) + log.Debugf("Error CmdRun: %s", err) } }() @@ -392,85 +211,23 @@ // In tty mode, we expect the process to stay alive even after client's stdin closes. // Give some time to monitor to do his thing - container.WaitTimeout(500 * time.Millisecond) - if !container.State.IsRunning() { + container.WaitStop(500 * time.Millisecond) + if !container.IsRunning() { t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") } } -// TestAttachStdin checks attaching to stdin without stdout and stderr. -// 'docker run -i -a stdin' should sends the client's stdin to the command, -// then detach from it and print the container id. -func TestRunAttachStdin(t *testing.T) { - - stdin, stdinPipe := io.Pipe() - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - ch := make(chan struct{}) - go func() { - defer close(ch) - cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat && sleep 5") - }() - - // Send input to the command, close stdin - setTimeout(t, "Write timed out", 10*time.Second, func() { - if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil { - t.Fatal(err) - } - if err := stdinPipe.Close(); err != nil { - t.Fatal(err) - } - }) - - container := globalRuntime.List()[0] - - // Check output - setTimeout(t, "Reading command output time out", 10*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if cmdOutput != container.ID+"\n" { - t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput) - } - }) - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { - <-ch - }) - - setTimeout(t, "Waiting for command to exit timed out", 10*time.Second, func() { - container.Wait() - }) - - // Check logs - if cmdLogs, err := container.ReadLog("json"); err != nil { - t.Fatal(err) - } else { - if output, err := ioutil.ReadAll(cmdLogs); err != nil { - t.Fatal(err) - } else { - expectedLogs := []string{"{\"log\":\"hello\\n\",\"stream\":\"stdout\"", "{\"log\":\"hi there\\n\",\"stream\":\"stdout\""} - for _, expectedLog := range expectedLogs { - if !strings.Contains(string(output), expectedLog) { - t.Fatalf("Unexpected logs: should contains '%s', it is not '%s'\n", expectedLog, output) - } - } - } - } -} - // TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -503,7 +260,7 @@ closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) - if !container.State.IsRunning() { + if !container.IsRunning() { t.Fatal("The detached container should be still running") } @@ -516,8 +273,12 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -550,7 +311,7 @@ stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { @@ -584,7 +345,7 @@ closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) - if !container.State.IsRunning() { + if !container.IsRunning() { t.Fatal("The detached container should be still running") } @@ -597,8 +358,12 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -616,7 +381,7 @@ stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { @@ -649,7 +414,7 @@ closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) - if !container.State.IsRunning() { + if !container.IsRunning() { t.Fatal("The detached container should be still running") } @@ -662,14 +427,18 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { - utils.Debugf("Error CmdRun: %s", err) + log.Debugf("Error CmdRun: %s", err) } }() @@ -681,15 +450,15 @@ setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { - l := globalRuntime.List() - if len(l) == 1 && l[0].State.IsRunning() { + l := globalDaemon.List() + if len(l) == 1 && l[0].IsRunning() { break } time.Sleep(10 * time.Millisecond) } }) - container := globalRuntime.List()[0] + container := globalDaemon.List()[0] // Attach to it c1 := make(chan struct{}) @@ -717,28 +486,32 @@ // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing - err := container.WaitTimeout(500 * time.Millisecond) - if err == nil || !container.State.IsRunning() { + _, err = container.WaitStop(500 * time.Millisecond) + if err == nil || !container.IsRunning() { t.Fatalf("/bin/cat is not running after closing stdin") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := container.StdinPipe() cStdin.Close() - container.Wait() + container.WaitStop(-1 * time.Second) } // Expected behaviour: container gets deleted automatically after exit func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdRun("-rm", unitTestImageID, "hostname"); err != nil { + if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() @@ -761,380 +534,31 @@ time.Sleep(500 * time.Millisecond) - if len(globalRuntime.List()) > 0 { + if len(globalDaemon.List()) > 0 { t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) } } -func TestCmdLogs(t *testing.T) { - t.Skip("Test not impemented") - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { - t.Fatal(err) - } - if err := cli.CmdRun("-t", unitTestImageID, "sh", "-c", "ls -l"); err != nil { - t.Fatal(err) - } - - if err := cli.CmdLogs(globalRuntime.List()[0].ID); err != nil { - t.Fatal(err) - } -} - // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - - cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - // This check is made at runtime, can't be "unit tested" - if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil { - t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount") - } - }() - - setTimeout(t, "CmdRun timed out", 5*time.Second, func() { - <-c - }) -} - -func TestImagesViz(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - image := buildTestImages(t, globalEngine) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdImages("--viz"); err != nil { - t.Fatal(err) - } - stdoutPipe.Close() - }() - - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout)) - if err != nil { - t.Fatal(err) - } - cmdOutput := string(cmdOutputBytes) - - regexpStrings := []string{ - "digraph docker {", - fmt.Sprintf("base -> \"%s\" \\[style=invis]", unitTestImageIDShort), - fmt.Sprintf("label=\"%s\\\\n%s:latest\"", unitTestImageIDShort, unitTestImageName), - fmt.Sprintf("label=\"%s\\\\n%s:%s\"", utils.TruncateID(image.ID), "test", "latest"), - "base \\[style=invisible]", - } - - compiledRegexps := []*regexp.Regexp{} - for _, regexpString := range regexpStrings { - regexp, err := regexp.Compile(regexpString) - if err != nil { - fmt.Println("Error in regex string: ", err) - return - } - compiledRegexps = append(compiledRegexps, regexp) - } - - for _, regexp := range compiledRegexps { - if !regexp.MatchString(cmdOutput) { - t.Fatalf("images --viz content '%s' did not match regexp '%s'", cmdOutput, regexp) - } - } - }) -} - -func TestImagesTree(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - image := buildTestImages(t, globalEngine) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdImages("--tree"); err != nil { - t.Fatal(err) - } - stdoutPipe.Close() - }() - - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout)) - if err != nil { - t.Fatal(err) - } - cmdOutput := string(cmdOutputBytes) - regexpStrings := []string{ - fmt.Sprintf("└─%s Virtual Size: \\d+.\\d+ MB Tags: %s:latest", unitTestImageIDShort, unitTestImageName), - "(?m) └─[0-9a-f]+.*", - "(?m) └─[0-9a-f]+.*", - "(?m) └─[0-9a-f]+.*", - fmt.Sprintf("(?m)^ └─%s Virtual Size: \\d+.\\d+ MB Tags: test:latest", utils.TruncateID(image.ID)), - } - - compiledRegexps := []*regexp.Regexp{} - for _, regexpString := range regexpStrings { - regexp, err := regexp.Compile(regexpString) - if err != nil { - fmt.Println("Error in regex string: ", err) - return - } - compiledRegexps = append(compiledRegexps, regexp) - } - - for _, regexp := range compiledRegexps { - if !regexp.MatchString(cmdOutput) { - t.Fatalf("images --tree content '%s' did not match regexp '%s'", cmdOutput, regexp) - } - } - }) -} - -func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image { - - var testBuilder = testContextTemplate{ - ` -from {IMAGE} -run sh -c 'echo root:testpass > /tmp/passwd' -run mkdir -p /var/run/sshd -run [ "$(cat /tmp/passwd)" = "root:testpass" ] -run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] -`, - nil, - nil, - } - image, err := buildImage(testBuilder, t, eng, true) - if err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", image.ID, "test").Run(); err != nil { - t.Fatal(err) - } - - return image -} - -// #2098 - Docker cidFiles only contain short version of the containerId -//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" -// TestRunCidFile tests that run --cidfile returns the longid -func TestRunCidFileCheckIDLength(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - - tmpDir, err := ioutil.TempDir("", "TestRunCidFile") - if err != nil { - t.Fatal(err) - } - tmpCidFile := path.Join(tmpDir, "cid") - - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil { - t.Fatal(err) - } - }() - - defer os.RemoveAll(tmpDir) - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if len(cmdOutput) < 1 { - t.Fatalf("'ls' should return something , not '%s'", cmdOutput) - } - //read the tmpCidFile - buffer, err := ioutil.ReadFile(tmpCidFile) - if err != nil { - t.Fatal(err) - } - id := string(buffer) - - if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") { - t.Fatalf("--cidfile should be a long id, not '%s'", id) - } - //test that its a valid cid? (though the container is gone..) - //remove the file and dir. - }) - - setTimeout(t, "CmdRun timed out", 5*time.Second, func() { - <-c - }) - -} - -// Ensure that CIDFile gets deleted if it's empty -// Perform this test by making `docker run` fail -func TestRunCidFileCleanupIfEmpty(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - tmpCidFile := path.Join(tmpDir, "cid") - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := client.NewDockerCli(nil, nil, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID); err == nil { - t.Fatal("running without a command should haveve failed") - } - if _, err := os.Stat(tmpCidFile); err == nil { - t.Fatalf("empty CIDFile '%s' should've been deleted", tmpCidFile) + // This check is made at runtime, can't be "unit tested" + if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil { + t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount") } }() - defer os.RemoveAll(tmpDir) setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) } - -func TestContainerOrphaning(t *testing.T) { - - // setup a temporary directory - tmpDir, err := ioutil.TempDir("", "project") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - // setup a CLI and server - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalEngine, t) - srv := mkServerFromEngine(globalEngine, t) - - // closure to build something - buildSomething := func(template string, image string) string { - dockerfile := path.Join(tmpDir, "Dockerfile") - replacer := strings.NewReplacer("{IMAGE}", unitTestImageID) - contents := replacer.Replace(template) - ioutil.WriteFile(dockerfile, []byte(contents), 0x777) - if err := cli.CmdBuild("-t", image, tmpDir); err != nil { - t.Fatal(err) - } - img, err := srv.ImageInspect(image) - if err != nil { - t.Fatal(err) - } - return img.ID - } - - // build an image - imageName := "orphan-test" - template1 := ` - from {IMAGE} - cmd ["/bin/echo", "holla"] - ` - img1 := buildSomething(template1, imageName) - - // create a container using the fist image - if err := cli.CmdRun(imageName); err != nil { - t.Fatal(err) - } - - // build a new image that splits lineage - template2 := ` - from {IMAGE} - cmd ["/bin/echo", "holla"] - expose 22 - ` - buildSomething(template2, imageName) - - // remove the second image by name - resp := engine.NewTable("", 0) - if err := srv.DeleteImage(imageName, resp, true, false); err == nil { - t.Fatal("Expected error, got none") - } - - // see if we deleted the first image (and orphaned the container) - for _, i := range resp.Data { - if img1 == i.Get("Deleted") { - t.Fatal("Orphaned image with container") - } - } - -} - -func TestCmdKill(t *testing.T) { - var ( - stdin, stdinPipe = io.Pipe() - stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) - ) - defer cleanup(globalEngine, t) - - ch := make(chan struct{}) - go func() { - defer close(ch) - cli.CmdRun("-i", "-t", unitTestImageID, "sh", "-c", "trap 'echo SIGUSR1' USR1; trap 'echo SIGUSR2' USR2; echo Ready; while true; do read; done") - }() - - container := waitContainerStart(t, 10*time.Second) - - setTimeout(t, "Read Ready timed out", 3*time.Second, func() { - if err := expectPipe("Ready", stdout); err != nil { - t.Fatal(err) - } - }) - - setTimeout(t, "SIGUSR1 timed out", 2*time.Second, func() { - for i := 0; i < 10; i++ { - if err := cli2.CmdKill("-s", strconv.Itoa(int(syscall.SIGUSR1)), container.ID); err != nil { - t.Fatal(err) - } - if err := expectPipe("SIGUSR1", stdout); err != nil { - t.Fatal(err) - } - } - }) - - setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() { - for i := 0; i < 10; i++ { - if err := cli2.CmdKill("--signal=USR2", container.ID); err != nil { - t.Fatal(err) - } - if err := expectPipe("SIGUSR2", stdout); err != nil { - t.Fatal(err) - } - } - }) - - stdout.Close() - time.Sleep(500 * time.Millisecond) - if !container.State.IsRunning() { - t.Fatal("The container should be still running") - } - - setTimeout(t, "Waiting for container timedout", 5*time.Second, func() { - if err := cli2.CmdKill(container.ID); err != nil { - t.Fatal(err) - } - - <-ch - if err := cli2.CmdWait(container.ID); err != nil { - t.Fatal(err) - } - }) - - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) -} diff -Nru docker.io-0.9.1~dfsg1/integration/container_test.go docker.io-1.3.2~dfsg1/integration/container_test.go --- docker.io-0.9.1~dfsg1/integration/container_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/container_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,1744 +1,259 @@ package docker import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" - "os" - "path" - "regexp" - "sort" - "strings" "testing" "time" + + "github.com/docker/docker/runconfig" ) -func TestIDFormat(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container1, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/sh", "-c", "echo hello world"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.ID)) - if err != nil { - t.Fatal(err) - } - if !match { - t.Fatalf("Invalid container ID: %s", container1.ID) - } -} +func TestRestartStdin(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, -func TestMultipleAttachRestart(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, _ := mkContainer( - runtime, - []string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"}, - t, + OpenStdin: true, + }, + &runconfig.HostConfig{}, + "", ) - defer runtime.Destroy(container) - - // Simulate 3 client attaching to the container and stop/restart - - stdout1, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - stdout2, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - stdout3, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - l1, err := bufio.NewReader(stdout1).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if strings.Trim(l1, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1) - } - l2, err := bufio.NewReader(stdout2).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if strings.Trim(l2, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2) - } - l3, err := bufio.NewReader(stdout3).ReadString('\n') if err != nil { t.Fatal(err) } - if strings.Trim(l3, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3) - } - - if err := container.Stop(10); err != nil { - t.Fatal(err) - } + defer daemon.Destroy(container) - stdout1, err = container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - stdout2, err = container.StdoutPipe() + stdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } - stdout3, err = container.StdoutPipe() + stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } - - setTimeout(t, "Timeout reading from the process", 3*time.Second, func() { - l1, err = bufio.NewReader(stdout1).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if strings.Trim(l1, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1) - } - l2, err = bufio.NewReader(stdout2).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if strings.Trim(l2, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2) - } - l3, err = bufio.NewReader(stdout3).ReadString('\n') - if err != nil { - t.Fatal(err) - } - if strings.Trim(l3, " \r\n") != "hello" { - t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3) - } - }) - container.Wait() -} - -func TestDiff(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) - // Create a container and remove a file - container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) - defer runtime.Destroy(container1) - - // The changelog should be empty and not fail before run. See #1705 - c, err := container1.Changes() - if err != nil { - t.Fatal(err) - } - if len(c) != 0 { - t.Fatalf("Changelog should be empty before run") - } - - if err := container1.Run(); err != nil { - t.Fatal(err) - } - - // Check the changelog - c, err = container1.Changes() - if err != nil { - t.Fatal(err) - } - success := false - for _, elem := range c { - if elem.Path == "/etc/passwd" && elem.Kind == 2 { - success = true - } - } - if !success { - t.Fatalf("/etc/passwd as been removed but is not present in the diff") - } - - // Commit the container - img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil) - if err != nil { - t.Fatal(err) - } - - // Create a new container from the commited image - container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) - defer runtime.Destroy(container2) - - if err := container2.Run(); err != nil { - t.Fatal(err) - } - - // Check the changelog - c, err = container2.Changes() - if err != nil { - t.Fatal(err) - } - for _, elem := range c { - if elem.Path == "/etc/passwd" { - t.Fatalf("/etc/passwd should not be present in the diff after commit.") - } - } - - // Create a new container - container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) - defer runtime.Destroy(container3) - - if err := container3.Run(); err != nil { - t.Fatal(err) - } - - // Check the changelog - c, err = container3.Changes() - if err != nil { - t.Fatal(err) - } - success = false - for _, elem := range c { - if elem.Path == "/bin/httpd" && elem.Kind == 2 { - success = true - } - } - if !success { - t.Fatalf("/bin/httpd should be present in the diff after commit.") - } -} - -func TestCommitAutoRun(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) - defer runtime.Destroy(container1) - - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container1.Run(); err != nil { - t.Fatal(err) - } - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - - img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}}) - if err != nil { - t.Error(err) - } - - // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _, _ := mkContainer(runtime, []string{img.ID}, t) - defer runtime.Destroy(container2) - stdout, err := container2.StdoutPipe() - if err != nil { - t.Fatal(err) - } - stderr, err := container2.StderrPipe() - if err != nil { + if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } - if err := container2.Start(); err != nil { + if err := stdin.Close(); err != nil { t.Fatal(err) } - container2.Wait() + container.WaitStop(-1 * time.Second) output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } - output2, err := ioutil.ReadAll(stderr) - if err != nil { - t.Fatal(err) - } if err := stdout.Close(); err != nil { t.Fatal(err) } - if err := stderr.Close(); err != nil { - t.Fatal(err) - } - if string(output) != "hello\n" { - t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2) - } -} - -func TestCommitRun(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) - defer runtime.Destroy(container1) - - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container1.Run(); err != nil { - t.Fatal(err) - } - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } - img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil) + // Restart and try again + stdin, err = container.StdinPipe() if err != nil { - t.Error(err) + t.Fatal(err) } - - // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) - defer runtime.Destroy(container2) - stdout, err := container2.StdoutPipe() + stdout, err = container.StdoutPipe() if err != nil { t.Fatal(err) } - stderr, err := container2.StderrPipe() - if err != nil { + if err := container.Start(); err != nil { t.Fatal(err) } - if err := container2.Start(); err != nil { + if _, err := io.WriteString(stdin, "hello world #2"); err != nil { t.Fatal(err) } - container2.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { + if err := stdin.Close(); err != nil { t.Fatal(err) } - output2, err := ioutil.ReadAll(stderr) + container.WaitStop(-1 * time.Second) + output, err = ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } - if err := stderr.Close(); err != nil { - t.Fatal(err) - } - if string(output) != "hello\n" { - t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2) + if string(output) != "hello world #2" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output)) } } -func TestStart(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, _ := mkContainer(runtime, []string{"-i", "_", "/bin/cat"}, t) - defer runtime.Destroy(container) +func TestStdin(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, - cStdin, err := container.StdinPipe() + OpenStdin: true, + }, + &runconfig.HostConfig{}, + "", + ) if err != nil { t.Fatal(err) } + defer daemon.Destroy(container) - if err := container.Start(); err != nil { + stdin, err := container.StdinPipe() + if err != nil { t.Fatal(err) } - - // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) - - if !container.State.IsRunning() { - t.Errorf("Container should be running") - } - if err := container.Start(); err == nil { - t.Fatalf("A running container should be able to be started") - } - - // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin.Close() - container.WaitTimeout(2 * time.Second) -} - -func TestCpuShares(t *testing.T) { - _, err1 := os.Stat("/sys/fs/cgroup/cpuacct,cpu") - _, err2 := os.Stat("/sys/fs/cgroup/cpu,cpuacct") - if err1 == nil || err2 == nil { - t.Skip("Fixme. Setting cpu cgroup shares doesn't work in dind on a Fedora host. The lxc utils are confused by the cpu,cpuacct mount.") - } - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) - defer runtime.Destroy(container) - - cStdin, err := container.StdinPipe() + stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } - if err := container.Start(); err != nil { t.Fatal(err) } - - // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) - - if !container.State.IsRunning() { - t.Errorf("Container should be running") - } - if err := container.Start(); err == nil { - t.Fatalf("A running container should be able to be started") - } - - // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin.Close() - container.WaitTimeout(2 * time.Second) -} - -func TestRun(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) - defer runtime.Destroy(container) - - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container.Run(); err != nil { + defer stdin.Close() + defer stdout.Close() + if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } -} - -func TestOutput(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"echo", "-n", "foobar"}, - }, - "", - ) - if err != nil { + if err := stdin.Close(); err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - output, err := container.Output() + container.WaitStop(-1 * time.Second) + output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } - if string(output) != "foobar" { - t.Fatalf("%s != %s", string(output), "foobar") + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } } -func TestKillDifferentUser(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) +func TestTty(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"cat"}, - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat"}, OpenStdin: true, - User: "daemon", }, + &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - // FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case - // there is a side effect I'm not seeing. - // defer container.stdin.Close() + defer daemon.Destroy(container) - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - - setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { - for !container.State.IsRunning() { - time.Sleep(10 * time.Millisecond) - } - }) - - setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { - out, _ := container.StdoutPipe() - in, _ := container.StdinPipe() - if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { - t.Fatal(err) - } - }) - - if err := container.Kill(); err != nil { - t.Fatal(err) - } - - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - container.Wait() - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - // Try stopping twice - if err := container.Kill(); err != nil { + stdin, err := container.StdinPipe() + if err != nil { t.Fatal(err) } -} - -// Test that creating a container with a volume doesn't crash. Regression test for #995. -func TestCreateVolume(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) - - config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) + stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } - jobCreate := eng.Job("create") - if err := jobCreate.ImportEnv(config); err != nil { + if err := container.Start(); err != nil { t.Fatal(err) } - var id string - jobCreate.Stdout.AddString(&id) - if err := jobCreate.Run(); err != nil { + defer stdin.Close() + defer stdout.Close() + if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } - jobStart := eng.Job("start", id) - if err := jobStart.ImportEnv(hc); err != nil { + if err := stdin.Close(); err != nil { t.Fatal(err) } - if err := jobStart.Run(); err != nil { + container.WaitStop(-1 * time.Second) + output, err := ioutil.ReadAll(stdout) + if err != nil { t.Fatal(err) } - // FIXME: this hack can be removed once Wait is a job - c := runtime.Get(id) - if c == nil { - t.Fatalf("Couldn't retrieve container %s from runtime", id) + if string(output) != "hello world" { + t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } - c.WaitTimeout(500 * time.Millisecond) - c.Wait() } -func TestKill(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sleep", "2"}, - }, - "", - ) - if err != nil { - t.Fatal(err) +func BenchmarkRunSequential(b *testing.B) { + daemon := mkDaemon(b) + defer nuke(daemon) + for i := 0; i < b.N; i++ { + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"echo", "-n", "foo"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + b.Fatal(err) + } + defer daemon.Destroy(container) + output, err := container.Output() + if err != nil { + b.Fatal(err) + } + if string(output) != "foo" { + b.Fatalf("Unexpected output: %s", output) + } + if err := daemon.Destroy(container); err != nil { + b.Fatal(err) + } } - defer runtime.Destroy(container) +} - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container.Start(); err != nil { - t.Fatal(err) - } +func BenchmarkRunParallel(b *testing.B) { + daemon := mkDaemon(b) + defer nuke(daemon) - // Give some time to lxc to spawn the process - container.WaitTimeout(500 * time.Millisecond) + var tasks []chan error - if !container.State.IsRunning() { - t.Errorf("Container should be running") - } - if err := container.Kill(); err != nil { - t.Fatal(err) - } - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - container.Wait() - if container.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - // Try stopping twice - if err := container.Kill(); err != nil { - t.Fatal(err) - } -} - -func TestExitCode(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - trueContainer, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/true"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(trueContainer) - if err := trueContainer.Run(); err != nil { - t.Fatal(err) - } - if code := trueContainer.State.GetExitCode(); code != 0 { - t.Fatalf("Unexpected exit code %d (expected 0)", code) - } - - falseContainer, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/false"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(falseContainer) - if err := falseContainer.Run(); err != nil { - t.Fatal(err) - } - if code := falseContainer.State.GetExitCode(); code != 1 { - t.Fatalf("Unexpected exit code %d (expected 1)", code) - } -} - -func TestRestart(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"echo", "-n", "foobar"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err := container.Output() - if err != nil { - t.Fatal(err) - } - if string(output) != "foobar" { - t.Error(string(output)) - } - - // Run the container again and check the output - output, err = container.Output() - if err != nil { - t.Fatal(err) - } - if string(output) != "foobar" { - t.Error(string(output)) - } -} - -func TestRestartStdin(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat"}, - - OpenStdin: true, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - stdin, err := container.StdinPipe() - if err != nil { - t.Fatal(err) - } - stdout, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - if _, err := io.WriteString(stdin, "hello world"); err != nil { - t.Fatal(err) - } - if err := stdin.Close(); err != nil { - t.Fatal(err) - } - container.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - if err := stdout.Close(); err != nil { - t.Fatal(err) - } - if string(output) != "hello world" { - t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) - } - - // Restart and try again - stdin, err = container.StdinPipe() - if err != nil { - t.Fatal(err) - } - stdout, err = container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - if _, err := io.WriteString(stdin, "hello world #2"); err != nil { - t.Fatal(err) - } - if err := stdin.Close(); err != nil { - t.Fatal(err) - } - container.Wait() - output, err = ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - if err := stdout.Close(); err != nil { - t.Fatal(err) - } - if string(output) != "hello world #2" { - t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output)) - } -} - -func TestUser(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - // Default user must be root - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err := container.Output() - if err != nil { - t.Fatal(err) - } - if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { - t.Error(string(output)) - } - - // Set a username - container, _, err = runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - - User: "root", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err = container.Output() - if code := container.State.GetExitCode(); err != nil || code != 0 { - t.Fatal(err) - } - if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { - t.Error(string(output)) - } - - // Set a UID - container, _, err = runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - - User: "0", - }, - "", - ) - if code := container.State.GetExitCode(); err != nil || code != 0 { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err = container.Output() - if code := container.State.GetExitCode(); err != nil || code != 0 { - t.Fatal(err) - } - if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { - t.Error(string(output)) - } - - // Set a different user by uid - container, _, err = runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - - User: "1", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err = container.Output() - if err != nil { - t.Fatal(err) - } else if code := container.State.GetExitCode(); code != 0 { - t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", code, output) - } - if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") { - t.Error(string(output)) - } - - // Set a different user by username - container, _, err = runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - - User: "daemon", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err = container.Output() - if code := container.State.GetExitCode(); err != nil || code != 0 { - t.Fatal(err) - } - if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") { - t.Error(string(output)) - } - - // Test an wrong username - container, _, err = runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"id"}, - - User: "unknownuser", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err = container.Output() - if container.State.GetExitCode() == 0 { - t.Fatal("Starting container with wrong uid should fail but it passed.") - } -} - -func TestMultipleContainers(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container1, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sleep", "2"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container1) - - container2, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sleep", "2"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - // Start both containers - if err := container1.Start(); err != nil { - t.Fatal(err) - } - if err := container2.Start(); err != nil { - t.Fatal(err) - } - - // Make sure they are running before trying to kill them - container1.WaitTimeout(250 * time.Millisecond) - container2.WaitTimeout(250 * time.Millisecond) - - // If we are here, both containers should be running - if !container1.State.IsRunning() { - t.Fatal("Container not running") - } - if !container2.State.IsRunning() { - t.Fatal("Container not running") - } - - // Kill them - if err := container1.Kill(); err != nil { - t.Fatal(err) - } - - if err := container2.Kill(); err != nil { - t.Fatal(err) - } -} - -func TestStdin(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat"}, - - OpenStdin: true, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - stdin, err := container.StdinPipe() - if err != nil { - t.Fatal(err) - } - stdout, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - defer stdin.Close() - defer stdout.Close() - if _, err := io.WriteString(stdin, "hello world"); err != nil { - t.Fatal(err) - } - if err := stdin.Close(); err != nil { - t.Fatal(err) - } - container.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - if string(output) != "hello world" { - t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) - } -} - -func TestTty(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat"}, - - OpenStdin: true, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - stdin, err := container.StdinPipe() - if err != nil { - t.Fatal(err) - } - stdout, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - if err := container.Start(); err != nil { - t.Fatal(err) - } - defer stdin.Close() - defer stdout.Close() - if _, err := io.WriteString(stdin, "hello world"); err != nil { - t.Fatal(err) - } - if err := stdin.Close(); err != nil { - t.Fatal(err) - } - container.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - if string(output) != "hello world" { - t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) - } -} - -func TestEnv(t *testing.T) { - os.Setenv("TRUE", "false") - os.Setenv("TRICKY", "tri\ncky\n") - runtime := mkRuntime(t) - defer nuke(runtime) - config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) - if err != nil { - t.Fatal(err) - } - container, _, err := runtime.Create(config, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - stdout, err := container.StdoutPipe() - if err != nil { - t.Fatal(err) - } - defer stdout.Close() - if err := container.Start(); err != nil { - t.Fatal(err) - } - container.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - actualEnv := strings.Split(string(output), "\n") - if actualEnv[len(actualEnv)-1] == "" { - actualEnv = actualEnv[:len(actualEnv)-1] - } - sort.Strings(actualEnv) - goodEnv := []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOME=/", - "HOSTNAME=" + utils.TruncateID(container.ID), - "FALSE=true", - "TRUE=false", - "TRICKY=tri", - "cky", - "", - } - sort.Strings(goodEnv) - if len(goodEnv) != len(actualEnv) { - t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", ")) - } - for i := range goodEnv { - if actualEnv[i] != goodEnv[i] { - t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) - } - } -} - -func TestEntrypoint(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Entrypoint: []string{"/bin/echo"}, - Cmd: []string{"-n", "foobar"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err := container.Output() - if err != nil { - t.Fatal(err) - } - if string(output) != "foobar" { - t.Error(string(output)) - } -} - -func TestEntrypointNoCmd(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Entrypoint: []string{"/bin/echo", "foobar"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - output, err := container.Output() - if err != nil { - t.Fatal(err) - } - if strings.Trim(string(output), "\r\n") != "foobar" { - t.Error(string(output)) - } -} - -func BenchmarkRunSequencial(b *testing.B) { - runtime := mkRuntime(b) - defer nuke(runtime) - for i := 0; i < b.N; i++ { - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"echo", "-n", "foo"}, - }, - "", - ) - if err != nil { - b.Fatal(err) - } - defer runtime.Destroy(container) - output, err := container.Output() - if err != nil { - b.Fatal(err) - } - if string(output) != "foo" { - b.Fatalf("Unexpected output: %s", output) - } - if err := runtime.Destroy(container); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkRunParallel(b *testing.B) { - runtime := mkRuntime(b) - defer nuke(runtime) - - var tasks []chan error - - for i := 0; i < b.N; i++ { - complete := make(chan error) - tasks = append(tasks, complete) - go func(i int, complete chan error) { - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"echo", "-n", "foo"}, - }, - "", - ) - if err != nil { - complete <- err - return - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - complete <- err - return - } - if err := container.WaitTimeout(15 * time.Second); err != nil { - complete <- err - return - } - // if string(output) != "foo" { - // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) - // } - if err := runtime.Destroy(container); err != nil { - complete <- err - return - } - complete <- nil - }(i, complete) - } - var errors []error - for _, task := range tasks { - err := <-task - if err != nil { - errors = append(errors, err) - } - } - if len(errors) > 0 { - b.Fatal(errors) - } -} - -func tempDir(t *testing.T) string { - tmpDir, err := ioutil.TempDir("", "docker-test-container") - if err != nil { - t.Fatal(err) - } - return tmpDir -} - -// Test for #1737 -func TestCopyVolumeUidGid(t *testing.T) { - eng := NewTestEngine(t) - r := mkRuntimeFromEngine(eng, t) - defer r.Nuke() - - // Add directory not owned by root - container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) - defer r.Destroy(container1) - - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container1.Run(); err != nil { - t.Fatal(err) - } - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - - img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) - if err != nil { - t.Error(err) - } - - // Test that the uid and gid is copied from the image to the volume - tmpDir1 := tempDir(t) - defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) - if !strings.Contains(stdout1, "daemon daemon") { - t.Fatal("Container failed to transfer uid and gid to volume") - } -} - -// Test for #1582 -func TestCopyVolumeContent(t *testing.T) { - eng := NewTestEngine(t) - r := mkRuntimeFromEngine(eng, t) - defer r.Nuke() - - // Put some content in a directory of a container and commit it - container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) - defer r.Destroy(container1) - - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - if err := container1.Run(); err != nil { - t.Fatal(err) - } - if container1.State.IsRunning() { - t.Errorf("Container shouldn't be running") - } - - img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) - if err != nil { - t.Error(err) - } - - // Test that the content is copied from the image to the volume - tmpDir1 := tempDir(t) - defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) - if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) { - t.Fatal("Container failed to transfer content to volume") - } -} - -func TestBindMounts(t *testing.T) { - eng := NewTestEngine(t) - r := mkRuntimeFromEngine(eng, t) - defer r.Nuke() - - tmpDir := tempDir(t) - defer os.RemoveAll(tmpDir) - writeFile(path.Join(tmpDir, "touch-me"), "", t) - - // Test reading from a read-only bind mount - stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) - if !strings.Contains(stdout, "touch-me") { - t.Fatal("Container failed to read from bind mount") - } - - // test writing to bind mount - runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) - readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist - - // test mounting to an illegal destination directory - if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { - t.Fatal("Container bind mounted illegal directory") - } - - // test mount a file - runContainer(eng, r, []string{"-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "_", "sh", "-c", "echo -n 'yotta' > /tmp/holla"}, t) - content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist - if content != "yotta" { - t.Fatal("Container failed to write to bind mount file") - } -} - -// Test that -volumes-from supports both read-only mounts -func TestFromVolumesInReadonlyMode(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - if !container.VolumesRW["/test"] { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: container.ID + ":ro", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Logf("container volumes do not match: %s | %s ", - container.Volumes["/test"], - container2.Volumes["/test"]) - t.Fail() - } - - _, exists := container2.VolumesRW["/test"] - if !exists { - t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW) - t.Fail() - } - - if container2.VolumesRW["/test"] != false { - t.Log("'/test' volume mounted in read-write mode, expected read-only") - t.Fail() - } -} - -// Test that VolumesRW values are copied to the new container. Regression test for #1201 -func TestVolumesFromReadonlyMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - if !container.VolumesRW["/test"] { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: container.ID, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Fail() - } - - actual, exists := container2.VolumesRW["/test"] - if !exists { - t.Fail() - } - - if container.VolumesRW["/test"] != actual { - t.Fail() - } -} - -// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. -func TestRestartWithVolumes(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"echo", "-n", "foobar"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - for key := range container.Config.Volumes { - if key != "/test" { - t.Fail() - } - } - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - expected := container.Volumes["/test"] - if expected == "" { - t.Fail() - } - // Run the container again to verify the volume path persists - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - actual := container.Volumes["/test"] - if expected != actual { - t.Fatalf("Expected volume path: %s Actual path: %s", expected, actual) - } -} - -// Test for #1351 -func TestVolumesFromWithVolumes(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - for key := range container.Config.Volumes { - if key != "/test" { - t.Fail() - } - } - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - expected := container.Volumes["/test"] - if expected == "" { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat", "/test/foo"}, - VolumesFrom: container.ID, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - output, err := container2.Output() - if err != nil { - t.Fatal(err) - } - - if string(output) != "bar" { - t.Fail() - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Fail() - } - - // Ensure it restarts successfully - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } -} - -func TestContainerNetwork(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - // If I change this to ping 8.8.8.8 it fails. Any idea why? - timthelion - Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Run(); err != nil { - t.Fatal(err) - } - if code := container.State.GetExitCode(); code != 0 { - t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) - } -} - -// Issue #4681 -func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, - NetworkDisabled: true, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Run(); err != nil { - t.Fatal(err) - } - if code := container.State.GetExitCode(); code != 0 { - t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) - } -} - -func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) - - config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) - if err != nil { - t.Fatal(err) - } - - jobCreate := eng.Job("create") - if err := jobCreate.ImportEnv(config); err != nil { - t.Fatal(err) - } - var id string - jobCreate.Stdout.AddString(&id) - if err := jobCreate.Run(); err != nil { - t.Fatal(err) - } - // FIXME: this hack can be removed once Wait is a job - c := runtime.Get(id) - if c == nil { - t.Fatalf("Couldn't retrieve container %s from runtime", id) - } - stdout, err := c.StdoutPipe() - if err != nil { - t.Fatal(err) - } - - jobStart := eng.Job("start", id) - if err := jobStart.ImportEnv(hc); err != nil { - t.Fatal(err) - } - if err := jobStart.Run(); err != nil { - t.Fatal(err) - } - - c.WaitTimeout(500 * time.Millisecond) - c.Wait() - output, err := ioutil.ReadAll(stdout) - if err != nil { - t.Fatal(err) - } - - interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(string(output), -1) - if len(interfaces) != 1 { - t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) - } - if !strings.HasSuffix(interfaces[0], ": lo") { - t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) - } -} - -func TestPrivilegedCanMknod(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer runtime.Nuke() - if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { - t.Fatalf("Could not mknod into privileged container %s %v", output, err) - } -} - -func TestPrivilegedCanMount(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { - t.Fatal("Could not mount into privileged container") - } -} - -func TestPrivilegedCannotMknod(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { - t.Fatal("Could mknod into secure container") - } -} - -func TestPrivilegedCannotMount(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { - t.Fatal("Could mount into secure container") - } -} - -func TestMultipleVolumesFrom(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - for key := range container.Config.Volumes { - if key != "/test" { - t.Fail() - } - } - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - expected := container.Volumes["/test"] - if expected == "" { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, - Volumes: map[string]struct{}{"/other": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) + for i := 0; i < b.N; i++ { + complete := make(chan error) + tasks = append(tasks, complete) + go func(i int, complete chan error) { + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, + Cmd: []string{"echo", "-n", "foo"}, + }, + &runconfig.HostConfig{}, + "", + ) + if err != nil { + complete <- err + return + } + defer daemon.Destroy(container) + if err := container.Start(); err != nil { + complete <- err + return + } + if _, err := container.WaitStop(15 * time.Second); err != nil { + complete <- err + return + } + // if string(output) != "foo" { + // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) + // } + if err := daemon.Destroy(container); err != nil { + complete <- err + return + } + complete <- nil + }(i, complete) } - defer runtime.Destroy(container2) - - for key := range container2.Config.Volumes { - if key != "/other" { - t.FailNow() + var errors []error + for _, task := range tasks { + err := <-task + if err != nil { + errors = append(errors, err) } } - if _, err := container2.Output(); err != nil { - t.Fatal(err) - } - - container3, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), - }, "") - - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container3) - - if _, err := container3.Output(); err != nil { - t.Fatal(err) - } - - if container3.Volumes["/test"] != container.Volumes["/test"] { - t.Fail() - } - if container3.Volumes["/other"] != container2.Volumes["/other"] { - t.Fail() - } -} - -func TestRestartGhost(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - - if err := container.Kill(); err != nil { - t.Fatal(err) - } - - container.State.SetGhost(true) - - _, err = container.Output() - if err != nil { - t.Fatal(err) + if len(errors) > 0 { + b.Fatal(errors) } } diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/ca.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/ca.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/ca.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/ca.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/client-cert.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/client-cert.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/client-cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/client-cert.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/client-key.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/client-key.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/client-key.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/client-key.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/client-rogue-cert.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/client-rogue-cert.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/client-rogue-cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/client-rogue-cert.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/client-rogue-key.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/client-rogue-key.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/client-rogue-key.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/client-rogue-key.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/server-cert.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/server-cert.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/server-cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/server-cert.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/server-key.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/server-key.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/server-key.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/server-key.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/server-rogue-cert.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/server-rogue-cert.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/server-rogue-cert.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/server-rogue-cert.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff -Nru docker.io-0.9.1~dfsg1/integration/fixtures/https/server-rogue-key.pem docker.io-1.3.2~dfsg1/integration/fixtures/https/server-rogue-key.pem --- docker.io-0.9.1~dfsg1/integration/fixtures/https/server-rogue-key.pem 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/fixtures/https/server-rogue-key.pem 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff -Nru docker.io-0.9.1~dfsg1/integration/graph_test.go docker.io-1.3.2~dfsg1/integration/graph_test.go --- docker.io-0.9.1~dfsg1/integration/graph_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/graph_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,11 +2,12 @@ import ( "errors" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/utils" "io" "io/ioutil" "os" @@ -24,7 +25,7 @@ if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing", "", nil) + image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } @@ -42,7 +43,7 @@ t.Fatal(err) } - if _, err := driver.Get(image.ID); err != nil { + if _, err := driver.Get(image.ID, ""); err != nil { t.Fatal(err) } } @@ -67,13 +68,13 @@ graph, _ := tempGraph(t) defer nukeGraph(graph) badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data - image := &docker.Image{ - ID: docker.GenerateID(), + image := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "testing", Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) - graph.Register(nil, badArchive, image) + graph.Register(image, nil, badArchive) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } @@ -82,7 +83,7 @@ if err != nil { t.Fatal(err) } - if err := graph.Register(nil, goodArchive, image); err != nil { + if err := graph.Register(image, nil, goodArchive); err != nil { t.Fatal(err) } } @@ -96,18 +97,18 @@ if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing", "", nil) + img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } - if err := docker.ValidateID(image.ID); err != nil { + if err := utils.ValidateID(img.ID); err != nil { t.Fatal(err) } - if image.Comment != "Testing" { - t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment) + if img.Comment != "Testing" { + t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) } - if image.DockerVersion != dockerversion.VERSION { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion) + if img.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) } images, err := graph.Map() if err != nil { @@ -115,8 +116,8 @@ } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } - if images[image.ID] == nil { - t.Fatalf("Could not find image with id %s", image.ID) + if images[img.ID] == nil { + t.Fatalf("Could not find image with id %s", img.ID) } } @@ -127,12 +128,12 @@ if err != nil { t.Fatal(err) } - image := &docker.Image{ - ID: docker.GenerateID(), + image := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "testing", Created: time.Now(), } - err = graph.Register(nil, archive, image) + err = graph.Register(image, nil, archive) if err != nil { t.Fatal(err) } @@ -164,12 +165,12 @@ assertNImages(graph, t, 0) } -func createTestImage(graph *docker.Graph, t *testing.T) *docker.Image { +func createTestImage(graph *graph.Graph, t *testing.T) *image.Image { archive, err := fakeTar() if err != nil { t.Fatal(err) } - img, err := graph.Create(archive, nil, "Test image", "", nil) + img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) if err != nil { t.Fatal(err) } @@ -184,7 +185,7 @@ t.Fatal(err) } assertNImages(graph, t, 0) - img, err := graph.Create(archive, nil, "Bla bla", "", nil) + img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) if err != nil { t.Fatal(err) } @@ -199,7 +200,7 @@ t.Fatal(err) } // Test 2 create (same name) / 1 delete - img1, err := graph.Create(archive, nil, "Testing", "", nil) + img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } @@ -207,7 +208,7 @@ if err != nil { t.Fatal(err) } - if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil { + if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { t.Fatal(err) } assertNImages(graph, t, 2) @@ -227,7 +228,7 @@ t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) - if err := graph.Register(nil, archive, img1); err != nil { + if err := graph.Register(img1, nil, archive); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { @@ -243,27 +244,27 @@ graph, _ := tempGraph(t) defer nukeGraph(graph) - parentImage := &docker.Image{ - ID: docker.GenerateID(), + parentImage := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "parent", Created: time.Now(), Parent: "", } - childImage1 := &docker.Image{ - ID: docker.GenerateID(), + childImage1 := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "child1", Created: time.Now(), Parent: parentImage.ID, } - childImage2 := &docker.Image{ - ID: docker.GenerateID(), + childImage2 := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "child2", Created: time.Now(), Parent: parentImage.ID, } - _ = graph.Register(nil, archive1, parentImage) - _ = graph.Register(nil, archive2, childImage1) - _ = graph.Register(nil, archive3, childImage2) + _ = graph.Register(parentImage, nil, archive1) + _ = graph.Register(childImage1, nil, archive2) + _ = graph.Register(childImage2, nil, archive3) byParent, err := graph.ByParent() if err != nil { @@ -279,7 +280,7 @@ * HELPER FUNCTIONS */ -func assertNImages(graph *docker.Graph, t *testing.T, n int) { +func assertNImages(graph *graph.Graph, t *testing.T, n int) { if images, err := graph.Map(); err != nil { t.Fatal(err) } else if actualN := len(images); actualN != n { @@ -287,23 +288,23 @@ } } -func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) { +func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { tmp, err := ioutil.TempDir("", "docker-graph-") if err != nil { t.Fatal(err) } - driver, err := graphdriver.New(tmp) + driver, err := graphdriver.New(tmp, nil) if err != nil { t.Fatal(err) } - graph, err := docker.NewGraph(tmp, driver) + graph, err := graph.NewGraph(tmp, driver) if err != nil { t.Fatal(err) } return graph, driver } -func nukeGraph(graph *docker.Graph) { +func nukeGraph(graph *graph.Graph) { graph.Driver().Cleanup() os.RemoveAll(graph.Root) } diff -Nru docker.io-0.9.1~dfsg1/integration/https_test.go docker.io-1.3.2~dfsg1/integration/https_test.go --- docker.io-0.9.1~dfsg1/integration/https_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/https_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,97 @@ +package docker + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/client" + "github.com/docker/libtrust" +) + +const ( + errBadCertificate = "remote error: bad certificate" + errCaUnknown = "x509: certificate signed by unknown authority" +) + +func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile("fixtures/https/ca.pem") + if err != nil { + t.Fatal(err) + } + certPool.AppendCertsFromPEM(file) + + cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile) + if err != nil { + t.Fatalf("Couldn't load X509 key pair: %s", err) + } + tlsConfig := &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{cert}, + } + return tlsConfig +} + +// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint +func TestHttpsInfo(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + if err := cli.CmdInfo(); err != nil { + t.Fatal(err) + } + }) +} + +// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func TestHttpsInfoRogueCert(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + if !strings.Contains(err.Error(), errBadCertificate) { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + }) +} + +// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func TestHttpsInfoRogueServerCert(t *testing.T) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, key, testDaemonProto, + testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + + if !strings.Contains(err.Error(), errCaUnknown) { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + + }) +} diff -Nru docker.io-0.9.1~dfsg1/integration/iptables_test.go docker.io-1.3.2~dfsg1/integration/iptables_test.go --- docker.io-0.9.1~dfsg1/integration/iptables_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/iptables_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/pkg/iptables" - "os" - "testing" -) - -// FIXME: this test should be a unit test. -// For example by mocking os/exec to make sure iptables is not actually called. - -func TestIptables(t *testing.T) { - if _, err := iptables.Raw("-L"); err != nil { - t.Fatal(err) - } - path := os.Getenv("PATH") - os.Setenv("PATH", "") - defer os.Setenv("PATH", path) - if _, err := iptables.Raw("-L"); err == nil { - t.Fatal("Not finding iptables in the PATH should cause an error") - } -} diff -Nru docker.io-0.9.1~dfsg1/integration/MAINTAINERS docker.io-1.3.2~dfsg1/integration/MAINTAINERS --- docker.io-0.9.1~dfsg1/integration/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/integration/README.md docker.io-1.3.2~dfsg1/integration/README.md --- docker.io-0.9.1~dfsg1/integration/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +## Legacy integration tests + +`./integration` contains Docker's legacy integration tests. +It is DEPRECATED and will eventually be removed. + +### If you are a *CONTRIBUTOR* and want to add a test: + +* Consider mocking out side effects and contributing a *unit test* in the subsystem +you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`. +The events subsystem has unit tests in `./events/events_test.go`. And so on. + +* For end-to-end integration tests, please contribute to `./integration-cli`. + + +### If you are a *MAINTAINER* + +Please don't allow patches adding new tests to `./integration`. + +### If you are *LOOKING FOR A WAY TO HELP* + +Please consider porting tests away from `./integration` and into either unit tests or CLI tests. + +Any help will be greatly appreciated! diff -Nru docker.io-0.9.1~dfsg1/integration/runtime_test.go docker.io-1.3.2~dfsg1/integration/runtime_test.go --- docker.io-0.9.1~dfsg1/integration/runtime_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/runtime_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,14 +3,8 @@ import ( "bytes" "fmt" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/sysinit" - "github.com/dotcloud/docker/utils" "io" - "log" + std_log "log" "net" "net/url" "os" @@ -21,37 +15,53 @@ "syscall" "testing" "time" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) const ( - unitTestImageName = "docker-test-image" - unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 - unitTestImageIDShort = "83599e29c455" - unitTestNetworkBridge = "testdockbr0" - unitTestStoreBase = "/var/lib/docker/unit-tests" - testDaemonAddr = "127.0.0.1:4270" - testDaemonProto = "tcp" + unitTestImageName = "docker-test-image" + unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 + unitTestImageIDShort = "83599e29c455" + unitTestNetworkBridge = "testdockbr0" + unitTestStoreBase = "/var/lib/docker/unit-tests" + unitTestDockerTmpdir = "/var/lib/docker/tmp" + testDaemonAddr = "127.0.0.1:4270" + testDaemonProto = "tcp" + testDaemonHttpsProto = "tcp" + testDaemonHttpsAddr = "localhost:4271" + testDaemonRogueHttpsAddr = "localhost:4272" ) var ( - // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. - globalRuntime *docker.Runtime - globalEngine *engine.Engine - startFds int - startGoroutines int + // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. + globalDaemon *daemon.Daemon + globalEngine *engine.Engine + globalHttpsEngine *engine.Engine + globalRogueHttpsEngine *engine.Engine + startFds int + startGoroutines int ) -// FIXME: nuke() is deprecated by Runtime.Nuke() -func nuke(runtime *docker.Runtime) error { - return runtime.Nuke() +// FIXME: nuke() is deprecated by Daemon.Nuke() +func nuke(daemon *daemon.Daemon) error { + return daemon.Nuke() } // FIXME: cleanup and nuke are redundant. func cleanup(eng *engine.Engine, t *testing.T) error { - runtime := mkRuntimeFromEngine(eng, t) - for _, container := range runtime.List() { + daemon := mkDaemonFromEngine(eng, t) + for _, container := range daemon.List() { container.Kill() - runtime.Destroy(container) + daemon.Destroy(container) } job := eng.Job("images") images, err := job.Stdout.AddTable() @@ -83,54 +93,56 @@ // To test other drivers, we need a dedicated driver validation suite. os.Setenv("DOCKER_DRIVER", "vfs") os.Setenv("TEST", "1") + os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) // Hack to run sys init during unit testing - if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { - sysinit.SysInit() + if reexec.Init() { return } if uid := syscall.Geteuid(); uid != 0 { - log.Fatal("docker tests need to be run as root") + log.Fatalf("docker tests need to be run as root") } // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { src, err := os.Open(dockerinit) if err != nil { - log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s\n", err) + log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) } defer src.Close() dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) if err != nil { - log.Fatalf("Unable to create dockerinit in test directory: %s\n", err) + log.Fatalf("Unable to create dockerinit in test directory: %s", err) } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { - log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s\n", err) + log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) } dst.Close() src.Close() } - // Setup the base runtime, which will be duplicated for each test. + // Setup the base daemon, which will be duplicated for each test. // (no tests are run directly in the base) setupBaseImage() - // Create the "global runtime" with a long-running daemon for integration tests + // Create the "global daemon" with a long-running daemons for integration tests spawnGlobalDaemon() + spawnLegitHttpsDaemon() + spawnRogueHttpsDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { - eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase) - job := eng.Job("inspect", unitTestImageName, "image") + eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) + job := eng.Job("image_inspect", unitTestImageName) img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. - if err := job.Run(); err != nil || img.Get("id") != unitTestImageID { + if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { // Retrieve the Image job = eng.Job("pull", unitTestImageName) - job.Stdout.Add(utils.NopWriteCloser(os.Stdout)) + job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) if err := job.Run(); err != nil { log.Fatalf("Unable to pull the test image: %s", err) } @@ -138,18 +150,18 @@ } func spawnGlobalDaemon() { - if globalRuntime != nil { - utils.Debugf("Global runtime already exists. Skipping.") + if globalDaemon != nil { + log.Debugf("Global daemon already exists. Skipping.") return } - t := log.New(os.Stderr, "", 0) + t := std_log.New(os.Stderr, "", 0) eng := NewTestEngine(t) globalEngine = eng - globalRuntime = mkRuntimeFromEngine(eng, t) + globalDaemon = mkDaemonFromEngine(eng, t) // Spawn a Daemon go func() { - utils.Debugf("Spawning global daemon for integration tests") + log.Debugf("Spawning global daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonProto, Host: testDaemonAddr, @@ -170,10 +182,65 @@ } } +func spawnLegitHttpsDaemon() { + if globalHttpsEngine != nil { + return + } + globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") +} + +func spawnRogueHttpsDaemon() { + if globalRogueHttpsEngine != nil { + return + } + globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") +} + +func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { + t := std_log.New(os.Stderr, "", 0) + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, + // and we want to set it to true. + + eng := newTestEngine(t, true, root) + + // Spawn a Daemon + go func() { + log.Debugf("Spawning https daemon for integration tests") + listenURL := &url.URL{ + Scheme: testDaemonHttpsProto, + Host: addr, + } + job := eng.Job("serveapi", listenURL.String()) + job.SetenvBool("Logging", true) + job.SetenvBool("Tls", true) + job.SetenvBool("TlsVerify", true) + job.Setenv("TlsCa", cacert) + job.Setenv("TlsCert", cert) + job.Setenv("TlsKey", key) + if err := job.Run(); err != nil { + log.Fatalf("Unable to spawn the test daemon: %s", err) + } + }() + + // Give some time to ListenAndServer to actually start + time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } + return eng +} + // FIXME: test that ImagePull(json=true) send correct json output -func GetTestImage(runtime *docker.Runtime) *docker.Image { - imgs, err := runtime.Graph().Map() +func GetTestImage(daemon *daemon.Daemon) *image.Image { + imgs, err := daemon.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) } @@ -182,23 +249,24 @@ return image } } - log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs) + log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) return nil } -func TestRuntimeCreate(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) +func TestDaemonCreate(t *testing.T) { + daemon := mkDaemon(t) + defer nuke(daemon) // Make sure we start we 0 containers - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 containers, %v found", len(runtime.List())) + if len(daemon.List()) != 0 { + t.Errorf("Expected 0 containers, %v found", len(daemon.List())) } - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, + &runconfig.HostConfig{}, "", ) if err != nil { @@ -206,81 +274,84 @@ } defer func() { - if err := runtime.Destroy(container); err != nil { + if err := daemon.Destroy(container); err != nil { t.Error(err) } }() // Make sure we can find the newly created container with List() - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if len(daemon.List()) != 1 { + t.Errorf("Expected 1 container, %v found", len(daemon.List())) } // Make sure the container List() returns is the right one - if runtime.List()[0].ID != container.ID { - t.Errorf("Unexpected container %v returned by List", runtime.List()[0]) + if daemon.List()[0].ID != container.ID { + t.Errorf("Unexpected container %v returned by List", daemon.List()[0]) } // Make sure we can get the container with Get() - if runtime.Get(container.ID) == nil { + if daemon.Get(container.ID) == nil { t.Errorf("Unable to get newly created container") } // Make sure it is the right container - if runtime.Get(container.ID) != container { + if daemon.Get(container.ID) != container { t.Errorf("Get() returned the wrong container") } // Make sure Exists returns it as existing - if !runtime.Exists(container.ID) { + if !daemon.Exists(container.ID) { t.Errorf("Exists() returned false for a newly created container") } // Test that conflict error displays correct details - testContainer, _, _ := runtime.Create( + testContainer, _, _ := daemon.Create( &runconfig.Config{ - Image: GetTestImage(runtime).ID, + Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, + &runconfig.HostConfig{}, "conflictname", ) - if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { + if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) } // Make sure create with bad parameters returns an error - if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { + if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } - if _, _, err := runtime.Create( + if _, _, err := daemon.Create( &runconfig.Config{ - Image: GetTestImage(runtime).ID, + Image: GetTestImage(daemon).ID, Cmd: []string{}, }, + &runconfig.HostConfig{}, "", ); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is empty") } config := &runconfig.Config{ - Image: GetTestImage(runtime).ID, + Image: GetTestImage(daemon).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, } - container, _, err = runtime.Create(config, "") + container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") - _, err = runtime.Commit(container, "testrepo", "testtag", "", "", config) + _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config) if err != nil { t.Error(err) } // test expose 80:8000 - container, warnings, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, + container, warnings, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, }, + &runconfig.HostConfig{}, "", ) if err != nil { @@ -292,83 +363,86 @@ } func TestDestroy(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + daemon := mkDaemon(t) + defer nuke(daemon) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, - }, "") + }, + &runconfig.HostConfig{}, + "") if err != nil { t.Fatal(err) } // Destroy - if err := runtime.Destroy(container); err != nil { + if err := daemon.Destroy(container); err != nil { t.Error(err) } - // Make sure runtime.Exists() behaves correctly - if runtime.Exists("test_destroy") { + // Make sure daemon.Exists() behaves correctly + if daemon.Exists("test_destroy") { t.Errorf("Exists() returned true") } - // Make sure runtime.List() doesn't list the destroyed container - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + // Make sure daemon.List() doesn't list the destroyed container + if len(daemon.List()) != 0 { + t.Errorf("Expected 0 container, %v found", len(daemon.List())) } - // Make sure runtime.Get() refuses to return the unexisting container - if runtime.Get(container.ID) != nil { + // Make sure daemon.Get() refuses to return the unexisting container + if daemon.Get(container.ID) != nil { t.Errorf("Unable to get newly created container") } // Test double destroy - if err := runtime.Destroy(container); err == nil { + if err := daemon.Destroy(container); err == nil { // It should have failed t.Errorf("Double destroy did not fail") } } func TestGet(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + daemon := mkDaemon(t) + defer nuke(daemon) - container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) - defer runtime.Destroy(container1) + container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container1) - container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) - defer runtime.Destroy(container2) + container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container2) - container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) - defer runtime.Destroy(container3) + container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) + defer daemon.Destroy(container3) - if runtime.Get(container1.ID) != container1 { - t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1) + if daemon.Get(container1.ID) != container1 { + t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1) } - if runtime.Get(container2.ID) != container2 { - t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2) + if daemon.Get(container2.ID) != container2 { + t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2) } - if runtime.Get(container3.ID) != container3 { - t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3) + if daemon.Get(container3.ID) != container3 { + t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3) } } -func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) { +func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { var ( - err error - id string - strPort string - eng = NewTestEngine(t) - runtime = mkRuntimeFromEngine(eng, t) - port = 5554 - p nat.Port + err error + id string + outputBuffer = bytes.NewBuffer(nil) + strPort string + eng = NewTestEngine(t) + daemon = mkDaemonFromEngine(eng, t) + port = 5554 + p nat.Port ) defer func() { if err != nil { - runtime.Nuke() + daemon.Nuke() } }() @@ -392,11 +466,12 @@ jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) jobCreate.SetenvJson("ExposedPorts", ep) - jobCreate.Stdout.AddString(&id) + jobCreate.Stdout.Add(outputBuffer) if err := jobCreate.Run(); err != nil { t.Fatal(err) } - // FIXME: this relies on the undocumented behavior of runtime.Create + id = engine.Tail(outputBuffer, 1) + // FIXME: this relies on the undocumented behavior of daemon.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! if id != "" { @@ -418,28 +493,28 @@ t.Fatal(err) } - container := runtime.Get(id) + container := daemon.Get(id) if container == nil { t.Fatalf("Couldn't fetch test container %s", id) } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { - for !container.State.IsRunning() { + for !container.IsRunning() { time.Sleep(10 * time.Millisecond) } }) // Even if the state is running, lets give some time to lxc to spawn the process - container.WaitTimeout(500 * time.Millisecond) + container.WaitStop(500 * time.Millisecond) strPort = container.NetworkSettings.Ports[p][0].HostPort - return runtime, container, strPort + return daemon, container, strPort } // Run a container with a TCP port allocated, and test that it can receive connections on localhost func TestAllocateTCPPortLocalhost(t *testing.T) { - runtime, container, port := startEchoServerContainer(t, "tcp") - defer nuke(runtime) + daemon, container, port := startEchoServerContainer(t, "tcp") + defer nuke(daemon) defer container.Kill() for i := 0; i != 10; i++ { @@ -487,8 +562,8 @@ // Run a container with an UDP port allocated, and test that it can receive connections on localhost func TestAllocateUDPPortLocalhost(t *testing.T) { - runtime, container, port := startEchoServerContainer(t, "udp") - defer nuke(runtime) + daemon, container, port := startEchoServerContainer(t, "udp") + defer nuke(daemon) defer container.Kill() conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) @@ -523,55 +598,55 @@ func TestRestore(t *testing.T) { eng := NewTestEngine(t) - runtime1 := mkRuntimeFromEngine(eng, t) - defer runtime1.Nuke() + daemon1 := mkDaemonFromEngine(eng, t) + defer daemon1.Nuke() // Create a container with one instance of docker - container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) - defer runtime1.Destroy(container1) + container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t) + defer daemon1.Destroy(container1) // Create a second container meant to be killed - container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) - defer runtime1.Destroy(container2) + container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t) + defer daemon1.Destroy(container2) // Start the container non blocking if err := container2.Start(); err != nil { t.Fatal(err) } - if !container2.State.IsRunning() { + if !container2.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' cStdin, _ := container2.StdinPipe() cStdin.Close() - if err := container2.WaitTimeout(2 * time.Second); err != nil { + if _, err := container2.WaitStop(2 * time.Second); err != nil { t.Fatal(err) } - container2.State.SetRunning(42) + container2.SetRunning(42) container2.ToDisk() - if len(runtime1.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(runtime1.List())) + if len(daemon1.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(daemon1.List())) } if err := container1.Run(); err != nil { t.Fatal(err) } - if !container2.State.IsRunning() { + if !container2.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Here are are simulating a docker restart - that is, reloading all containers // from scratch - eng = newTestEngine(t, false, eng.Root()) - runtime2 := mkRuntimeFromEngine(eng, t) - if len(runtime2.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(runtime2.List())) + eng = newTestEngine(t, false, daemon1.Config().Root) + daemon2 := mkDaemonFromEngine(eng, t) + if len(daemon2.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(daemon2.List())) } runningCount := 0 - for _, c := range runtime2.List() { - if c.State.IsRunning() { + for _, c := range daemon2.List() { + if c.IsRunning() { t.Errorf("Running container found: %v (%v)", c.ID, c.Path) runningCount++ } @@ -579,34 +654,34 @@ if runningCount != 0 { t.Fatalf("Expected 0 container alive, %d found", runningCount) } - container3 := runtime2.Get(container1.ID) + container3 := daemon2.Get(container1.ID) if container3 == nil { t.Fatal("Unable to Get container") } if err := container3.Run(); err != nil { t.Fatal(err) } - container2.State.SetStopped(0) + container2.SetStopped(0) } func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } - container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name")) + container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) containerID := container.ID if container.Name != "/some_name" { t.Fatalf("Expect /some_name got %s", container.Name) } - if c := runtime.Get("/some_name"); c == nil { + if c := daemon.Get("/some_name"); c == nil { t.Fatalf("Couldn't retrieve test container as /some_name") } else if c.ID != containerID { t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) @@ -615,22 +690,22 @@ func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) - config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } - container := runtime.Get(createTestContainer(eng, config, t)) + container := daemon.Get(createTestContainer(eng, config, t)) containerID := container.ID if container.Name == "" { t.Fatalf("Expected not empty container name") } - if c := runtime.Get(container.Name); c == nil { + if c := daemon.Get(container.Name); c == nil { log.Fatalf("Could not lookup container %s by its name", container.Name) } else if c.ID != containerID { log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) @@ -639,8 +714,8 @@ func TestContainerNameValidation(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) for _, test := range []struct { Name string @@ -649,7 +724,7 @@ {"abc-123_AAA.1", true}, {"\000asdf", false}, } { - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { if !test.Valid { continue @@ -657,12 +732,12 @@ t.Fatal(err) } - var shortID string + var outputBuffer = bytes.NewBuffer(nil) job := eng.Job("create", test.Name) if err := job.ImportEnv(config); err != nil { t.Fatal(err) } - job.Stdout.AddString(&shortID) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { if !test.Valid { continue @@ -670,13 +745,13 @@ t.Fatal(err) } - container := runtime.Get(shortID) + container := daemon.Get(engine.Tail(outputBuffer, 1)) if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) } - if c := runtime.Get("/" + test.Name); c == nil { + if c := daemon.Get("/" + test.Name); c == nil { t.Fatalf("Couldn't retrieve test container as /%s", test.Name) } else if c.ID != container.ID { t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) @@ -687,17 +762,17 @@ func TestLinkChildContainer(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } - container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp")) + container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) - webapp, err := runtime.GetByName("/webapp") + webapp, err := daemon.GetByName("/webapp") if err != nil { t.Fatal(err) } @@ -706,19 +781,19 @@ t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } - childContainer := runtime.Get(createTestContainer(eng, config, t)) + childContainer := daemon.Get(createTestContainer(eng, config, t)) - if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { + if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } // Get the child by it's new name - db, err := runtime.GetByName("/webapp/db") + db, err := daemon.GetByName("/webapp/db") if err != nil { t.Fatal(err) } @@ -729,17 +804,17 @@ func TestGetAllChildren(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) - defer nuke(runtime) + daemon := mkDaemonFromEngine(eng, t) + defer nuke(daemon) - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } - container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp")) + container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) - webapp, err := runtime.GetByName("/webapp") + webapp, err := daemon.GetByName("/webapp") if err != nil { t.Fatal(err) } @@ -748,18 +823,18 @@ t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } - childContainer := runtime.Get(createTestContainer(eng, config, t)) + childContainer := daemon.Get(createTestContainer(eng, config, t)) - if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { + if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } - children, err := runtime.Children("/webapp") + children, err := daemon.Children("/webapp") if err != nil { t.Fatal(err) } @@ -782,41 +857,43 @@ } func TestDestroyWithInitLayer(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + daemon := mkDaemon(t) + defer nuke(daemon) - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, + container, _, err := daemon.Create(&runconfig.Config{ + Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, - }, "") + }, + &runconfig.HostConfig{}, + "") if err != nil { t.Fatal(err) } // Destroy - if err := runtime.Destroy(container); err != nil { + if err := daemon.Destroy(container); err != nil { t.Fatal(err) } - // Make sure runtime.Exists() behaves correctly - if runtime.Exists("test_destroy") { + // Make sure daemon.Exists() behaves correctly + if daemon.Exists("test_destroy") { t.Fatalf("Exists() returned true") } - // Make sure runtime.List() doesn't list the destroyed container - if len(runtime.List()) != 0 { - t.Fatalf("Expected 0 container, %v found", len(runtime.List())) + // Make sure daemon.List() doesn't list the destroyed container + if len(daemon.List()) != 0 { + t.Fatalf("Expected 0 container, %v found", len(daemon.List())) } - driver := runtime.Graph().Driver() + driver := daemon.Graph().Driver() // Make sure that the container does not exist in the driver - if _, err := driver.Get(container.ID); err == nil { + if _, err := driver.Get(container.ID, ""); err == nil { t.Fatal("Conttainer should not exist in the driver") } // Make sure that the init layer is removed from the driver - if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID)); err == nil { + if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { t.Fatal("Container's init layer should not exist in the driver") } } diff -Nru docker.io-0.9.1~dfsg1/integration/server_test.go docker.io-1.3.2~dfsg1/integration/server_test.go --- docker.io-0.9.1~dfsg1/integration/server_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/server_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,125 +1,18 @@ package docker import ( - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "strings" + "bytes" "testing" "time" -) - -func TestImageTagImageDelete(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - srv := mkServerFromEngine(eng, t) - - initialImages := getAllImages(eng, t) - if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { - t.Fatal(err) - } - - images := getAllImages(eng, t) - - nExpected := len(initialImages.Data[0].GetList("RepoTags")) + 3 - nActual := len(images.Data[0].GetList("RepoTags")) - if nExpected != nActual { - t.Errorf("Expected %d images, %d found", nExpected, nActual) - } - - if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 2 - nActual = len(images.Data[0].GetList("RepoTags")) - if nExpected != nActual { - t.Errorf("Expected %d images, %d found", nExpected, nActual) - } - if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 - nActual = len(images.Data[0].GetList("RepoTags")) - - if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - if images.Len() != initialImages.Len() { - t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) - } -} - -func TestCreateRm(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) - if err != nil { - t.Fatal(err) - } - - id := createTestContainer(eng, config, t) - - job := eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 1 { - t.Errorf("Expected 1 container, %v found", len(outs.Data)) - } - - job = eng.Job("container_delete", id) - job.SetenvBool("removeVolume", true) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("containers") - job.SetenvBool("all", true) - outs, err = job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 0 { - t.Errorf("Expected 0 container, %v found", len(outs.Data)) - } - -} + "github.com/docker/docker/engine" +) func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -127,145 +20,11 @@ createTestContainer(eng, config, t) } -func TestCreateNumberUsername(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) - if err != nil { - t.Fatal(err) - } - - createTestContainer(eng, config, t) -} - -func TestCreateRmVolumes(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) - if err != nil { - t.Fatal(err) - } - - id := createTestContainer(eng, config, t) - - job := eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 1 { - t.Errorf("Expected 1 container, %v found", len(outs.Data)) - } - - job = eng.Job("start", id) - if err := job.ImportEnv(hostConfig); err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("stop", id) - job.SetenvInt("t", 1) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("container_delete", id) - job.SetenvBool("removeVolume", true) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("containers") - job.SetenvBool("all", true) - outs, err = job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 0 { - t.Errorf("Expected 0 container, %v found", len(outs.Data)) - } -} - -func TestCreateRmRunning(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil) - if err != nil { - t.Fatal(err) - } - - id := createTestContainer(eng, config, t) - - job := eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 1 { - t.Errorf("Expected 1 container, %v found", len(outs.Data)) - } - - job = eng.Job("start", id) - if err := job.ImportEnv(hostConfig); err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - // Test cannot remove running container - job = eng.Job("container_delete", id) - job.SetenvBool("forceRemove", false) - if err := job.Run(); err == nil { - t.Fatal("Expected container delete to fail") - } - - // Test can force removal of running container - job = eng.Job("container_delete", id) - job.SetenvBool("forceRemove", true) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("containers") - job.SetenvBool("all", true) - outs, err = job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 0 { - t.Errorf("Expected 0 container, %v found", len(outs.Data)) - } -} - func TestCommit(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -283,13 +42,13 @@ func TestMergeConfigOnCommit(t *testing.T) { eng := NewTestEngine(t) - runtime := mkRuntimeFromEngine(eng, t) + runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) defer runtime.Destroy(container1) - config, _, _, err := runconfig.Parse([]string{container1.ID, "cat /tmp/foo"}, nil) + config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}, nil) if err != nil { t.Error(err) } @@ -298,22 +57,22 @@ job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) - var newId string - job.Stdout.AddString(&newId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Error(err) } - container2, _, _ := mkContainer(runtime, []string{newId}, t) + container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) defer runtime.Destroy(container2) - job = eng.Job("inspect", container1.Name, "container") + job = eng.Job("container_inspect", container1.Name) baseContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } - job = eng.Job("inspect", container2.Name, "container") + job = eng.Job("container_inspect", container2.Name) commitContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) @@ -340,11 +99,10 @@ func TestRestartKillWait(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := mkRuntimeFromEngine(eng, t) + runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() - config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -377,10 +135,9 @@ t.Fatal(err) } - eng = newTestEngine(t, false, eng.Root()) - srv = mkServerFromEngine(eng, t) + eng = newTestEngine(t, false, runtime.Config().Root) - job = srv.Eng.Job("containers") + job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { @@ -395,9 +152,7 @@ } setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { - job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) - var statusStr string - job.Stdout.AddString(&statusStr) + job = eng.Job("wait", outs.Data[0].Get("Id")) if err := job.Run(); err != nil { t.Fatal(err) } @@ -406,17 +161,16 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() - config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - job := srv.Eng.Job("containers") + job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { @@ -439,13 +193,13 @@ } job = eng.Job("restart", id) - job.SetenvInt("t", 15) + job.SetenvInt("t", 2) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("stop", id) - job.SetenvInt("t", 15) + job.SetenvInt("t", 2) if err := job.Run(); err != nil { t.Fatal(err) } @@ -463,13 +217,13 @@ } // FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty") - job = eng.Job("container_delete", id) + job = eng.Job("rm", id) job.SetenvBool("removeVolume", true) if err := job.Run(); err != nil { t.Fatal(err) } - job = srv.Eng.Job("containers") + job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { @@ -486,7 +240,7 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) { eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() + defer mkDaemonFromEngine(eng, t).Nuke() // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. job := eng.Job("create") @@ -494,102 +248,14 @@ job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) - var id string - job.Stdout.AddString(&id) if err := job.Run(); err == nil { t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") } } -func TestRmi(t *testing.T) { - eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - initialImages := getAllImages(eng, t) - - config, hostConfig, _, err := runconfig.Parse([]string{unitTestImageID, "echo", "test"}, nil) - if err != nil { - t.Fatal(err) - } - - containerID := createTestContainer(eng, config, t) - - //To remove - job := eng.Job("start", containerID) - if err := job.ImportEnv(hostConfig); err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("wait", containerID).Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("commit", containerID) - job.Setenv("repo", "test") - var imageID string - job.Stdout.AddString(&imageID) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", imageID, "test", "0.1").Run(); err != nil { - t.Fatal(err) - } - - containerID = createTestContainer(eng, config, t) - - //To remove - job = eng.Job("start", containerID) - if err := job.ImportEnv(hostConfig); err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("wait", containerID).Run(); err != nil { - t.Fatal(err) - } - - job = eng.Job("commit", containerID) - job.Setenv("repo", "test") - if err := job.Run(); err != nil { - t.Fatal(err) - } - - images := getAllImages(eng, t) - - if images.Len()-initialImages.Len() != 2 { - t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) - } - - if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - if images.Len()-initialImages.Len() != 1 { - t.Fatalf("Expected 1 new image, found %d.", images.Len()-initialImages.Len()) - } - - for _, image := range images.Data { - if strings.Contains(unitTestImageID, image.Get("Id")) { - continue - } - if image.GetList("RepoTags")[0] == ":" { - t.Fatalf("Expected tagged image, got untagged one.") - } - } -} - func TestImagesFilter(t *testing.T) { eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) + defer nuke(mkDaemonFromEngine(eng, t)) if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) @@ -627,191 +293,3 @@ t.Fatal("incorrect number of matches returned") } } - -func TestImageInsert(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - srv := mkServerFromEngine(eng, t) - - // bad image name fails - if err := srv.Eng.Job("insert", "foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err == nil { - t.Fatal("expected an error and got none") - } - - // bad url fails - if err := srv.Eng.Job("insert", unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo").Run(); err == nil { - t.Fatal("expected an error and got none") - } - - // success returns nil - if err := srv.Eng.Job("insert", unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err != nil { - t.Fatalf("expected no error, but got %v", err) - } -} - -func TestListContainers(t *testing.T) { - eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - config := runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"/bin/sh", "-c", "cat"}, - OpenStdin: true, - } - - firstID := createTestContainer(eng, &config, t) - secondID := createTestContainer(eng, &config, t) - thirdID := createTestContainer(eng, &config, t) - fourthID := createTestContainer(eng, &config, t) - defer func() { - containerKill(eng, firstID, t) - containerKill(eng, secondID, t) - containerKill(eng, fourthID, t) - containerWait(eng, firstID, t) - containerWait(eng, secondID, t) - containerWait(eng, fourthID, t) - }() - - startContainer(eng, firstID, t) - startContainer(eng, secondID, t) - startContainer(eng, fourthID, t) - - // all - if !assertContainerList(srv, true, -1, "", "", []string{fourthID, thirdID, secondID, firstID}) { - t.Error("Container list is not in the correct order") - } - - // running - if !assertContainerList(srv, false, -1, "", "", []string{fourthID, secondID, firstID}) { - t.Error("Container list is not in the correct order") - } - - // from here 'all' flag is ignored - - // limit - expected := []string{fourthID, thirdID} - if !assertContainerList(srv, true, 2, "", "", expected) || - !assertContainerList(srv, false, 2, "", "", expected) { - t.Error("Container list is not in the correct order") - } - - // since - expected = []string{fourthID, thirdID, secondID} - if !assertContainerList(srv, true, -1, firstID, "", expected) || - !assertContainerList(srv, false, -1, firstID, "", expected) { - t.Error("Container list is not in the correct order") - } - - // before - expected = []string{secondID, firstID} - if !assertContainerList(srv, true, -1, "", thirdID, expected) || - !assertContainerList(srv, false, -1, "", thirdID, expected) { - t.Error("Container list is not in the correct order") - } - - // since & before - expected = []string{thirdID, secondID} - if !assertContainerList(srv, true, -1, firstID, fourthID, expected) || - !assertContainerList(srv, false, -1, firstID, fourthID, expected) { - t.Error("Container list is not in the correct order") - } - - // since & limit - expected = []string{fourthID, thirdID} - if !assertContainerList(srv, true, 2, firstID, "", expected) || - !assertContainerList(srv, false, 2, firstID, "", expected) { - t.Error("Container list is not in the correct order") - } - - // before & limit - expected = []string{thirdID} - if !assertContainerList(srv, true, 1, "", fourthID, expected) || - !assertContainerList(srv, false, 1, "", fourthID, expected) { - t.Error("Container list is not in the correct order") - } - - // since & before & limit - expected = []string{thirdID} - if !assertContainerList(srv, true, 1, firstID, fourthID, expected) || - !assertContainerList(srv, false, 1, firstID, fourthID, expected) { - t.Error("Container list is not in the correct order") - } -} - -func assertContainerList(srv *docker.Server, all bool, limit int, since, before string, expected []string) bool { - job := srv.Eng.Job("containers") - job.SetenvBool("all", all) - job.SetenvInt("limit", limit) - job.Setenv("since", since) - job.Setenv("before", before) - outs, err := job.Stdout.AddListTable() - if err != nil { - return false - } - if err := job.Run(); err != nil { - return false - } - if len(outs.Data) != len(expected) { - return false - } - for i := 0; i < len(outs.Data); i++ { - if outs.Data[i].Get("Id") != expected[i] { - return false - } - } - return true -} - -// Regression test for being able to untag an image with an existing -// container -func TestDeleteTagWithExistingContainers(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkRuntimeFromEngine(eng, t)) - - srv := mkServerFromEngine(eng, t) - - // Tag the image - if err := eng.Job("tag", unitTestImageID, "utest", "tag1").Run(); err != nil { - t.Fatal(err) - } - - // Create a container from the image - config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) - if err != nil { - t.Fatal(err) - } - - id := createNamedTestContainer(eng, config, t, "testingtags") - if id == "" { - t.Fatal("No id returned") - } - - job := srv.Eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - - if len(outs.Data) != 1 { - t.Fatalf("Expected 1 container got %d", len(outs.Data)) - } - - // Try to remove the tag - imgs := engine.NewTable("", 0) - if err := srv.DeleteImage("utest:tag1", imgs, true, false); err != nil { - t.Fatal(err) - } - - if len(imgs.Data) != 1 { - t.Fatalf("Should only have deleted one untag %d", len(imgs.Data)) - } - - if untag := imgs.Data[0].Get("Untagged"); untag != "utest:tag1" { - t.Fatalf("Expected %s got %s", unitTestImageID, untag) - } -} diff -Nru docker.io-0.9.1~dfsg1/integration/sorter_test.go docker.io-1.3.2~dfsg1/integration/sorter_test.go --- docker.io-0.9.1~dfsg1/integration/sorter_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/sorter_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/engine" - "testing" - "time" -) - -func TestServerListOrderedImagesByCreationDate(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - if err := generateImage("", eng); err != nil { - t.Fatal(err) - } - - images := getImages(eng, t, true, "") - - if images.Data[0].GetInt("Created") < images.Data[1].GetInt("Created") { - t.Error("Expected images to be ordered by most recent creation date.") - } -} - -func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - err := generateImage("bar", eng) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - err = generateImage("zed", eng) - if err != nil { - t.Fatal(err) - } - - images := getImages(eng, t, true, "") - - if repoTags := images.Data[0].GetList("RepoTags"); repoTags[0] != "repo:zed" && repoTags[0] != "repo:bar" { - t.Errorf("Expected Images to be ordered by most recent creation date.") - } -} - -func generateImage(name string, eng *engine.Engine) error { - archive, err := fakeTar() - if err != nil { - return err - } - job := eng.Job("import", "-", "repo", name) - job.Stdin.Add(archive) - job.SetenvBool("json", true) - return job.Run() -} diff -Nru docker.io-0.9.1~dfsg1/integration/utils_test.go docker.io-1.3.2~dfsg1/integration/utils_test.go --- docker.io-0.9.1~dfsg1/integration/utils_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/utils_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,7 +3,6 @@ import ( "bytes" "fmt" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net/http" @@ -14,63 +13,69 @@ "testing" "time" - "github.com/dotcloud/docker" - "github.com/dotcloud/docker/builtins" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) // This file contains utility functions for docker's unit test suite. // It has to be named XXX_test.go, apparently, in other to access private functions // from other XXX_test.go functions. -// Create a temporary runtime suitable for unit testing. +// Create a temporary daemon suitable for unit testing. // Call t.Fatal() at the first error. -func mkRuntime(f utils.Fataler) *docker.Runtime { +func mkDaemon(f log.Fataler) *daemon.Daemon { eng := newTestEngine(f, false, "") - return mkRuntimeFromEngine(eng, f) + return mkDaemonFromEngine(eng, f) // FIXME: // [...] // Mtu: docker.GetDefaultNetworkMtu(), // [...] } -func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler, name string) (shortId string) { +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) } - job.Stdout.AddString(&shortId) + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { f.Fatal(err) } - return + return engine.Tail(outputBuffer, 1) } -func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } -func startContainer(eng *engine.Engine, id string, t utils.Fataler) { +func startContainer(eng *engine.Engine, id string, t log.Fataler) { job := eng.Job("start", id) if err := job.Run(); err != nil { t.Fatal(err) } } -func containerRun(eng *engine.Engine, id string, t utils.Fataler) { +func containerRun(eng *engine.Engine, id string, t log.Fataler) { startContainer(eng, id, t) containerWait(eng, id, t) } -func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool { +func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool { c := getContainer(eng, id, t) if err := c.Mount(); err != nil { t.Fatal(err) } defer c.Unmount() - if _, err := os.Stat(path.Join(c.BasefsPath(), dir)); err != nil { + if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { if os.IsNotExist(err) { return false } @@ -79,7 +84,7 @@ return true } -func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) { +func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) { c := getContainer(eng, id, t) i, err := c.StdinPipe() if err != nil { @@ -92,38 +97,40 @@ return i, o } -func containerWait(eng *engine.Engine, id string, t utils.Fataler) int { - return getContainer(eng, id, t).Wait() +func containerWait(eng *engine.Engine, id string, t log.Fataler) int { + ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second) + return ex } -func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error { - return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond) +func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error { + _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond) + return err } -func containerKill(eng *engine.Engine, id string, t utils.Fataler) { +func containerKill(eng *engine.Engine, id string, t log.Fataler) { if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } } -func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool { - return getContainer(eng, id, t).State.IsRunning() +func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool { + return getContainer(eng, id, t).IsRunning() } -func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) { +func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) { getContainer(eng, id, t) } -func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) { - runtime := mkRuntimeFromEngine(eng, t) - if c := runtime.Get(id); c != nil { +func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { + daemon := mkDaemonFromEngine(eng, t) + if c := daemon.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) } } // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. -func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) { +func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) @@ -132,47 +139,35 @@ // assertHttpError expect the given response to have an error. // Otherwise the it causes the test to fail. -func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) { +func assertHttpError(r *httptest.ResponseRecorder, t log.Fataler) { // Non-error http status are [200, 400) if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) } } -func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container { - runtime := mkRuntimeFromEngine(eng, t) - c := runtime.Get(id) +func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Container { + daemon := mkDaemonFromEngine(eng, t) + c := daemon.Get(id) if c == nil { t.Fatal(fmt.Errorf("No such container: %s", id)) } return c } -func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { - iSrv := eng.Hack_GetGlobalVar("httpapi.server") - if iSrv == nil { - panic("Legacy server field not set in engine") +func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { + iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon") + if iDaemon == nil { + panic("Legacy daemon field not set in engine") } - srv, ok := iSrv.(*docker.Server) + daemon, ok := iDaemon.(*daemon.Daemon) if !ok { - panic("Legacy server field in engine does not cast to *docker.Server") + panic("Legacy daemon field in engine does not cast to *daemon.Daemon") } - return srv + return daemon } -func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { - iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime") - if iRuntime == nil { - panic("Legacy runtime field not set in engine") - } - runtime, ok := iRuntime.(*docker.Runtime) - if !ok { - panic("Legacy runtime field in engine does not cast to *docker.Runtime") - } - return runtime -} - -func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine { +func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) @@ -180,26 +175,32 @@ root = dir } } - eng, err := engine.New(root) - if err != nil { - t.Fatal(err) - } + os.MkdirAll(root, 0700) + + eng := engine.New() + eng.Logging = false // Load default plugins builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) - job := eng.Job("initserver") - job.Setenv("Root", root) - job.SetenvBool("AutoRestart", autorestart) - job.Setenv("ExecDriver", "native") - // TestGetEnabledCors and TestOptionsRoute require EnableCors=true - job.SetenvBool("EnableCors", true) - if err := job.Run(); err != nil { + cfg := &daemon.Config{ + Root: root, + AutoRestart: autorestart, + ExecDriver: "native", + // Either InterContainerCommunication or EnableIptables must be set, + // otherwise NewDaemon will fail because of conflicting settings. + InterContainerCommunication: true, + } + d, err := daemon.NewDaemon(cfg, eng) + if err != nil { + t.Fatal(err) + } + if err := d.Install(eng); err != nil { t.Fatal(err) } return eng } -func NewTestEngine(t utils.Fataler) *engine.Engine { +func NewTestEngine(t log.Fataler) *engine.Engine { return newTestEngine(t, false, "") } @@ -244,13 +245,13 @@ return string(data) } -// Create a test container from the given runtime `r` and run arguments `args`. +// Create a test container from the given daemon `r` and run arguments `args`. // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. -func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) { - config, hc, _, err := runconfig.Parse(args, nil) +func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { + config, hc, _, err := parseRun(args, nil) defer func() { if err != nil && t != nil { t.Fatal(err) @@ -262,7 +263,7 @@ if config.Image == "_" { config.Image = GetTestImage(r).ID } - c, _, err := r.Create(config, "") + c, _, err := r.Create(config, nil, "") if err != nil { return nil, nil, err } @@ -280,7 +281,7 @@ // and return its standard output as a string. // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. -func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) { +func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) { defer func() { if err != nil && t != nil { t.Fatal(err) @@ -305,7 +306,7 @@ return "", err } - container.Wait() + container.WaitStop(-1 * time.Second) data, err := ioutil.ReadAll(stdout) if err != nil { return "", err @@ -350,3 +351,10 @@ return images } + +func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return runconfig.Parse(cmd, args, sysInfo) +} diff -Nru docker.io-0.9.1~dfsg1/integration/z_final_test.go docker.io-1.3.2~dfsg1/integration/z_final_test.go --- docker.io-0.9.1~dfsg1/integration/z_final_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration/z_final_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,7 +1,7 @@ package docker import ( - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/utils" "runtime" "testing" ) @@ -11,7 +11,7 @@ } func TestFinal(t *testing.T) { - nuke(globalRuntime) + nuke(globalDaemon) t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines) displayFdGoroutines(t) } diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,8 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +FROM busybox +COPY https://index.docker.io/robots.txt / diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +FROM scratch +COPY . / diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,17 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,7 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN chown -R dockerio.dockerio /exists +COPY test_file1 /exists/ +ADD test_file2 test_file3 /exists/test_file1 diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,10 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,9 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +FROM busybox +COPY test_file . diff -Nru docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile --- docker.io-0.9.1~dfsg1/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_api_inspect_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_api_inspect_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_api_inspect_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_api_inspect_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,58 @@ +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "testing" +) + +func TestInspectApiContainerResponse(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + // test on json marshal version + // and latest version + testVersions := []string{"v1.11", "latest"} + + for _, testVersion := range testVersions { + endpoint := "/containers/" + cleanedContainerID + "/json" + if testVersion != "latest" { + endpoint = "/" + testVersion + endpoint + } + body, err := sockRequest("GET", endpoint) + if err != nil { + t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) + } + + var inspectJSON map[string]interface{} + if err = json.Unmarshal(body, &inspectJSON); err != nil { + t.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err) + } + + keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"} + + if testVersion == "v1.11" { + keys = append(keys, "ID") + } else { + keys = append(keys, "Id") + } + + for _, key := range keys { + if _, ok := inspectJSON[key]; !ok { + t.Fatalf("%s does not exist in reponse for %s version", key, testVersion) + } + } + //Issue #6830: type not properly converted to JSON/back + if _, ok := inspectJSON["Path"].(bool); ok { + t.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") + } + } + + deleteAllContainers() + + logDone("container json - check keys in container json response") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_attach_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_attach_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_attach_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_attach_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,89 @@ +package main + +import ( + "io" + "os/exec" + "strings" + "sync" + "testing" + "time" +) + +const attachWait = 5 * time.Second + +func TestAttachMultipleAndRestart(t *testing.T) { + defer deleteAllContainers() + + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil { + t.Fatal(err) + } + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + c := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + c.Wait() + endGroup.Done() + }() + + out, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if _, err := startCommand(c); err != nil { + t.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + t.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + t.Fatalf("Attaches did not initialize properly") + } + + cmd := exec.Command(dockerBinary, "kill", "attacher") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + select { + case <-endDone: + case <-time.After(attachWait): + t.Fatalf("Attaches did not finish properly") + } + + logDone("attach - multiple attach") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_build_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_build_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_build_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,3177 @@ +package main + +import ( + "archive/tar" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" +) + +func TestBuildShCmdJSONEntrypoint(t *testing.T) { + name := "testbuildshcmdjsonentrypoint" + defer deleteImages(name) + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["/bin/echo"] + CMD echo test + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput( + exec.Command( + dockerBinary, + "run", + name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + t.Fatal("CMD did not contain /bin/sh -c") + } + + logDone("build - CMD should always contain /bin/sh -c when specified without JSON") +} + +func TestBuildEnvironmentReplacementUser(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.User") + if err != nil { + t.Fatal(err) + } + + if res != `"foo"` { + t.Fatal("User foo from environment not in Config.User on image") + } + + logDone("build - user environment replacement") +} + +func TestBuildEnvironmentReplacementVolume(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV volume /quux + VOLUME ${volume} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + t.Fatal(err) + } + + if _, ok := volumes["/quux"]; !ok { + t.Fatal("Volume /quux from environment not in Config.Volumes on image") + } + + logDone("build - volume environment replacement") +} + +func TestBuildEnvironmentReplacementExpose(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + t.Fatal(err) + } + + if _, ok := exposedPorts["80/tcp"]; !ok { + t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") + } + + logDone("build - expose environment replacement") +} + +func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + t.Fatal(err) + } + + logDone("build - workdir environment replacement") +} + +func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + ctx, err := fakeContext(` + FROM scratch + ENV baz foo + ENV quux bar + ENV dot . + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + }) + + if err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - add/copy environment replacement") +} + +func TestBuildEnvironmentReplacementEnv(t *testing.T) { + name := "testbuildenvironmentreplacement" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV foo foo + ENV bar ${foo} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + + envResult := []string{} + + if err = unmarshalJSON([]byte(res), &envResult); err != nil { + t.Fatal(err) + } + + found := false + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "foo" { + t.Fatal("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + } + } + } + + if !found { + t.Fatal("Never found the `bar` env variable") + } + + logDone("build - env environment replacement") +} + +func TestBuildHandleEscapes(t *testing.T) { + name := "testbuildhandleescapes" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + var result map[string]map[string]struct{} + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + t.Fatal("Could not find volume bar set from env foo in volumes table") + } + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") + } + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result[`\\\\\\${FOO}`]; !ok { + t.Fatal(`Could not find volume \\\\\\${FOO} set from env foo in volumes table`) + } + + logDone("build - handle escapes") +} + +func TestBuildOnBuildLowercase(t *testing.T) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + defer deleteImages(name, name2) + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + t.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(out, "quux") { + t.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + + logDone("build - handle case-insensitive onbuild statement") +} + +func TestBuildEnvEscapes(t *testing.T) { + name := "testbuildenvescapes" + defer deleteAllContainers() + defer deleteImages(name) + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "$" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should handle \\$ properly") +} + +func TestBuildEnvOverwrite(t *testing.T) { + name := "testbuildenvoverwrite" + defer deleteAllContainers() + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "bar" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should overwrite builder ENV during run") +} + +func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenmaintainerinsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden maintainer in source image") + +} + +func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenfrominsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden from in source image") + +} + +func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { + name := "testbuildonbuildforbiddenchainedinsourceimage" + defer deleteImages(name) + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(createCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") + + if _, err := runCommand(commitCmd); err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + `FROM onbuild`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + t.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden chained in source image") + +} + +func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + defer deleteAllContainers() + defer deleteImages(name2) + defer deleteImages(name1) + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err) + } + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + t.Fatal("did not get echo output from onbuild", out) + } + + logDone("build - onbuild with json entrypoint/cmd") +} + +func TestBuildOnBuildEntrypointJSON(t *testing.T) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + defer deleteAllContainers() + defer deleteImages(name2) + defer deleteImages(name1) + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err) + } + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + t.Fatal("got malformed output from onbuild", out) + } + + logDone("build - onbuild with json entrypoint") +} + +func TestBuildCacheADD(t *testing.T) { + name := "testbuildtwoimageswithadd" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL), + true); err != nil { + t.Fatal(err) + } + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + if strings.Contains(out, "Using cache") { + t.Fatal("2nd build used cache on ADD, it shouldn't") + } + + logDone("build - build two images with remote ADD") +} + +func TestBuildSixtySteps(t *testing.T) { + name := "foobuildsixtysteps" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), + map[string]string{ + "foo": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - build an image with sixty build steps") +} + +func TestBuildAddSingleFileToRoot(t *testing.T) { + name := "testaddimg" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add single file to root") +} + +// Issue #3960: "ADD src ." hangs +func TestBuildAddSingleFileToWorkdir(t *testing.T) { + name := "testaddsinglefiletoworkdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + done := make(chan struct{}) + go func() { + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + close(done) + }() + select { + case <-time.After(5 * time.Second): + t.Fatal("Build with adding to workdir timed out") + case <-done: + } + logDone("build - add single file to workdir") +} + +func TestBuildAddSingleFileToExistDir(t *testing.T) { + name := "testaddsinglefiletoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add single file to existing dir") +} + +func TestBuildCopyAddMultipleFiles(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testaddimg") + + logDone("build - mulitple file copy/add tests") +} + +func TestBuildAddMultipleFilesToFile(t *testing.T) { + name := "testaddmultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple add files to file") +} + +func TestBuildAddMultipleFilesToFileWild(t *testing.T) { + name := "testaddmultiplefilestofilewild" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD file*.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple add files to file wild") +} + +func TestBuildCopyMultipleFilesToFile(t *testing.T) { + name := "testcopymultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + COPY file1.txt file2.txt test + `, + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - multiple copy files to file") +} + +func TestBuildCopyWildcard(t *testing.T) { + name := "testcopywildcard" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN mkdir /tmp1 + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN mkdir /tmp2 + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + if id1 != id2 { + t.Fatal("didn't use the cache") + } + + logDone("build - copy wild card") +} + +func TestBuildCopyWildcardNoFind(t *testing.T) { + name := "testcopywildcardnofind" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + COPY file*.txt /tmp/ + `, nil) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + _, err = buildImageFromContext(name, ctx, true) + if err == nil { + t.Fatal("should have failed to find a file") + } + if !strings.Contains(err.Error(), "No source files were specified") { + t.Fatalf("Wrong error %v, must be about no source files", err) + } + + logDone("build - copy wild card no find") +} + +func TestBuildCopyWildcardCache(t *testing.T) { + name := "testcopywildcardcache" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + if id1 != id2 { + t.Fatal("didn't use the cache") + } + + logDone("build - copy wild card cache") +} + +func TestBuildAddSingleFileToNonExistDir(t *testing.T) { + name := "testaddsinglefiletononexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } +} + +func TestBuildAddDirContentToRoot(t *testing.T) { + name := "testadddircontenttoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add directory contents to root") +} + +func TestBuildAddDirContentToExistDir(t *testing.T) { + name := "testadddircontenttoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add directory contents to existing dir") +} + +func TestBuildAddWholeDirToRoot(t *testing.T) { + name := "testaddwholedirtoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add whole directory to root") +} + +// Testing #5941 +func TestBuildAddEtcToRoot(t *testing.T) { + name := "testaddetctoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add etc directory to root") +} + +func TestBuildCopySingleFileToRoot(t *testing.T) { + testDirName := "SingleFileToRoot" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to root") +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func TestBuildCopySingleFileToWorkdir(t *testing.T) { + testDirName := "SingleFileToWorkdir" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + _, exitCode, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", ".") + if err != nil || exitCode != 0 { + t.Fatalf("build failed: %s", err) + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to workdir") +} + +func TestBuildCopySingleFileToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to existing dir") +} + +func TestBuildCopySingleFileToNonExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy single file to non-existing dir") +} + +func TestBuildCopyDirContentToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to root") +} + +func TestBuildCopyDirContentToExistDir(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy directory contents to existing dir") +} + +func TestBuildCopyWholeDirToRoot(t *testing.T) { + testDirName := "WholeDirToRoot" + sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) + buildDirectory, err := ioutil.TempDir("", "test-build-add") + defer os.RemoveAll(buildDirectory) + + err = copyWithCP(sourceDirectory, buildDirectory) + if err != nil { + t.Fatalf("failed to copy files to temporary directory: %s", err) + } + + buildDirectory = filepath.Join(buildDirectory, testDirName) + testDir := filepath.Join(buildDirectory, "test_dir") + if err := os.MkdirAll(testDir, 0755); err != nil { + t.Fatal(err) + } + f, err := os.OpenFile(filepath.Join(testDir, "test_file"), os.O_CREATE, 0644) + if err != nil { + t.Fatal(err) + } + f.Close() + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + + logDone("build - copy whole directory to root") +} + +func TestBuildCopyEtcToRoot(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + deleteImages("testcopyimg") + logDone("build - copy etc directory to root") +} + +func TestBuildCopyDisallowRemote(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") + buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatalf("building the image should've failed; output: %s", out) + } + + deleteImages("testcopyimg") + logDone("build - copy - disallow copy from remote") +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func TestBuildWithInaccessibleFilesInContext(t *testing.T) { + { + name := "testbuildinaccessiblefiles" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + err = os.Chown(pathToFileWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) + err = os.Chmod(pathToFileWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + } + { + name := "testbuildinaccessibledirectory" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err == nil || exitCode == 0 { + t.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + t.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context is accessible") { + t.Fatalf("output should've contained the string: Error checking context is accessible") + } + + } + { + name := "testlinksok" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), "../../../../../../../../../../../../../../../../../../../azA"); err != nil { + t.Fatal(err) + } + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) + errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) + err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) + errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) + err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) + errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build should have worked: %s %s", err, out) + } + + } + logDone("build - ADD from context with inaccessible files must fail") + logDone("build - ADD from context with accessible links must work") + logDone("build - ADD from context with ignored inaccessible files must work") +} + +func TestBuildForceRm(t *testing.T) { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") + buildCmd.Dir = ctx.Dir + _, exitCode, err := runCommandWithOutput(buildCmd) + + if err == nil || exitCode == 0 { + t.Fatal("failed to build the image") + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--force-rm shouldn't have left containers behind") + } + + logDone("build - ensure --force-rm doesn't leave containers behind") +} + +func TestBuildRm(t *testing.T) { + name := "testbuildrm" + defer deleteImages(name) + ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + t.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + t.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + t.Fatalf("--rm=false should have left containers behind") + } + deleteAllContainers() + deleteImages(name) + + } + + logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") + logDone("build - ensure --rm=false overrides the default") +} + +func TestBuildWithVolumes(t *testing.T) { + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(res), &result) + if err != nil { + t.Fatal(err) + } + + equal := deepEqual(&expected, &result) + + if !equal { + t.Fatalf("Volumes %s, expected %s", result, expected) + } + + logDone("build - with volumes") +} + +func TestBuildMaintainer(t *testing.T) { + name := "testbuildmaintainer" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Maintainer %s, expected %s", res, expected) + } + logDone("build - maintainer") +} + +func TestBuildUser(t *testing.T) { + name := "testbuilduser" + expected := "dockerio" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.User") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("User %s, expected %s", res, expected) + } + logDone("build - user") +} + +func TestBuildRelativeWorkdir(t *testing.T) { + name := "testbuildrelativeworkdir" + expected := "/test2/test3" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN [ "$PWD" = '/' ] + WORKDIR test1 + RUN [ "$PWD" = '/test1' ] + WORKDIR /test2 + RUN [ "$PWD" = '/test2' ] + WORKDIR test3 + RUN [ "$PWD" = '/test2/test3' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Workdir %s, expected %s", res, expected) + } + logDone("build - relative workdir") +} + +func TestBuildWorkdirWithEnvVariables(t *testing.T) { + name := "testbuildworkdirwithenvvariables" + expected := "/test1/test2/$MISSING_VAR" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.WorkingDir") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Workdir %s, expected %s", res, expected) + } + logDone("build - workdir with env variables") +} + +func TestBuildEnv(t *testing.T) { + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Env %s, expected %s", res, expected) + } + logDone("build - env") +} + +func TestBuildContextCleanup(t *testing.T) { + name := "testbuildcontextcleanup" + defer deleteImages(name) + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + t.Fatalf("context should have been deleted, but wasn't") + } + + logDone("build - verify context cleanup works properly") +} + +func TestBuildContextCleanupFailedBuild(t *testing.T) { + name := "testbuildcontextcleanup" + defer deleteImages(name) + entries, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM scratch + RUN /non/existing/command`, + true) + if err == nil { + t.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") + if err != nil { + t.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + t.Fatalf("context should have been deleted, but wasn't") + } + + logDone("build - verify context cleanup works properly after a failed build") +} + +func TestBuildCmd(t *testing.T) { + name := "testbuildcmd" + expected := "[/bin/echo Hello World]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + logDone("build - cmd") +} + +func TestBuildExpose(t *testing.T) { + name := "testbuildexpose" + expected := "map[2375/tcp:map[]]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Exposed ports %s, expected %s", res, expected) + } + logDone("build - expose") +} + +func TestBuildEmptyEntrypointInheritance(t *testing.T) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + defer deleteImages(name, name2) + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected := "[/bin/echo]" + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + t.Fatal(err) + } + res, err = inspectField(name2, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected = "[]" + + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - empty entrypoint inheritance") +} + +func TestBuildEmptyEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + defer deleteImages(name) + expected := "[]" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - empty entrypoint") +} + +func TestBuildEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + expected := "[/bin/echo]" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - entrypoint") +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func TestBuildOnBuildLimitedInheritence(t *testing.T) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + t.Fatal(err) + } + + out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err)) + defer deleteImages(name1) + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + t.Fatal(err) + } + + out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err)) + defer deleteImages(name2) + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + t.Fatal(err) + } + + out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err)) + defer deleteImages(name3) + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + + logDone("build - onbuild") +} + +func TestBuildWithCache(t *testing.T) { + name := "testbuildwithcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - with cache") +} + +func TestBuildWithoutCache(t *testing.T) { + name := "testbuildwithoutcache" + defer deleteImages(name) + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - without cache") +} + +func TestBuildADDLocalFileWithCache(t *testing.T) { + name := "testbuildaddlocalfilewithcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local file with cache") +} + +func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { + name := "testbuildaddmultiplelocalfilewithcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN [ "$(cat /usr/lib/bla/foo)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add multiple local files with cache") +} + +func TestBuildADDLocalFileWithoutCache(t *testing.T) { + name := "testbuildaddlocalfilewithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local file without cache") +} + +func TestBuildCopyDirButNotFile(t *testing.T) { + name := "testbuildcopydirbutnotfile" + defer deleteImages(name) + dockerfile := ` + FROM scratch + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but wasn't") + } + logDone("build - add current directory but not file") +} + +func TestBuildADDCurrentDirWithCache(t *testing.T) { + name := "testbuildaddcurrentdirwithcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id2 == id3 { + t.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + t.Fatal(err) + } + id4, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id3 == id4 { + t.Fatal("The cache should have been invalided but hasn't.") + } + id5, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id4 != id5 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add current directory with cache") +} + +func TestBuildADDCurrentDirWithoutCache(t *testing.T) { + name := "testbuildaddcurrentdirwithoutcache" + defer deleteImages(name) + dockerfile := ` + FROM scratch + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add current directory without cache") +} + +func TestBuildADDRemoteFileWithCache(t *testing.T) { + name := "testbuildaddremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add remote file with cache") +} + +func TestBuildADDRemoteFileWithoutCache(t *testing.T) { + name := "testbuildaddremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + id1, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), + false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add remote file without cache") +} + +func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but hasn't.") + } + logDone("build - add local and remote file with cache") +} + +func testContextTar(t *testing.T, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + t.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + defer deleteImages(name) + buildCmd.Stdin = context + + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build failed to complete: %v %v", out, err) + } + logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) +} + +func TestBuildContextTarGzip(t *testing.T) { + testContextTar(t, archive.Gzip) +} + +func TestBuildContextTarNoCompression(t *testing.T) { + testContextTar(t, archive.Uncompressed) +} + +func TestBuildNoContext(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") + + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + t.Fatalf("build failed to complete: %v %v", out, err) + } + + out, exitCode, err = cmd(t, "run", "nocontext") + if out != "ok\n" { + t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } + + deleteImages("nocontext") + logDone("build - build an image with no context") +} + +// TODO: TestCaching +func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { + name := "testbuildaddlocalandremotefilewithoutcache" + defer deleteImages(name) + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + t.Fatal(err) + } + if id1 == id2 { + t.Fatal("The cache should have been invalided but hasn't.") + } + logDone("build - add local and remote file without cache") +} + +func TestBuildWithVolumeOwnership(t *testing.T) { + name := "testbuildimg" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + if expected := "drw-------"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + t.Fatalf("expected %s received %s", expected, out) + } + + logDone("build - volume ownership") +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func TestBuildEntrypointRunCleanup(t *testing.T) { + name := "testbuildcmdcleanup" + defer deleteImages(name) + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + t.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + // Cmd must be cleaned up + if expected := ""; res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + logDone("build - cleanup cmd after RUN") +} + +func TestBuildForbiddenContextPath(t *testing.T) { + name := "testbuildforbidpath" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD ../../ test/ + `, + map[string]string{ + "test.txt": "test1", + "other.txt": "other", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + + expected := "Forbidden path outside the build context: ../../ " + if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + } + + logDone("build - forbidden context path") +} + +func TestBuildADDFileNotFound(t *testing.T) { + name := "testbuildaddnotfound" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), "foo: no such file or directory") { + t.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - add file not found") +} + +func TestBuildInheritance(t *testing.T) { + name := "testbuildinheritance" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + t.Fatal(err) + } + ports1, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if expected := "[/bin/echo]"; res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2, err := inspectField(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + if ports1 != ports2 { + t.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } + logDone("build - inheritance") +} + +func TestBuildFails(t *testing.T) { + name := "testbuildfails" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + t.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - fails") +} + +func TestBuildFailsDockerfileEmpty(t *testing.T) { + name := "testbuildfails" + defer deleteImages(name) + _, err := buildImage(name, ``, true) + if err != nil { + if !strings.Contains(err.Error(), "Dockerfile cannot be empty") { + t.Fatalf("Wrong error %v, must be about empty Dockerfile", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - fails with empty dockerfile") +} + +func TestBuildOnBuild(t *testing.T) { + name := "testbuildonbuild" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + t.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + t.Fatal(err) + } + logDone("build - onbuild") +} + +func TestBuildOnBuildForbiddenChained(t *testing.T) { + name := "testbuildonbuildforbiddenchained" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD ONBUILD RUN touch foobar`, + true) + if err != nil { + if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { + t.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden chained") +} + +func TestBuildOnBuildForbiddenFrom(t *testing.T) { + name := "testbuildonbuildforbiddenfrom" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD FROM scratch`, + true) + if err != nil { + if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about FROM forbidden", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden from") +} + +func TestBuildOnBuildForbiddenMaintainer(t *testing.T) { + name := "testbuildonbuildforbiddenmaintainer" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ONBUILD MAINTAINER docker.io`, + true) + if err != nil { + if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { + t.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) + } + } else { + t.Fatal("Error must not be nil") + } + logDone("build - onbuild forbidden maintainer") +} + +// gh #2446 +func TestBuildAddToSymlinkDest(t *testing.T) { + name := "testbuildaddtosymlinkdest" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox + RUN mkdir /foo + RUN ln -s /foo /bar + ADD foo /bar/ + RUN [ -f /bar/foo ] + RUN [ -f /foo/foo ]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add to symlink destination") +} + +func TestBuildEscapeWhitespace(t *testing.T) { + name := "testbuildescaping" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + + res, err := inspectField(name, "Author") + + if err != nil { + t.Fatal(err) + } + + if res != "Docker IO " { + t.Fatal("Parsed string did not match the escaped string") + } + + logDone("build - validate escaping whitespace") +} + +func TestBuildDockerignore(t *testing.T) { + name := "testbuilddockerignore" + defer deleteImages(name) + dockerfile := ` + FROM busybox + ADD . /bla + RUN [[ -f /bla/src/x.go ]] + RUN [[ -f /bla/Makefile ]] + RUN [[ ! -e /bla/src/_vendor ]] + RUN [[ ! -e /bla/.gitignore ]] + RUN [[ ! -e /bla/README.md ]] + RUN [[ ! -e /bla/.git ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ".git\npkg\n.gitignore\nsrc/_vendor\n*.md", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore") +} + +func TestBuildDockerignoringDockerfile(t *testing.T) { + name := "testbuilddockerignoredockerfile" + defer deleteImages(name) + dockerfile := ` + FROM scratch` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + ".dockerignore": "Dockerfile\n", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err = buildImageFromContext(name, ctx, true); err == nil { + t.Fatalf("Didn't get expected error from ignoring Dockerfile") + } + logDone("build - test .dockerignore of Dockerfile") +} + +func TestBuildDockerignoringWholeDir(t *testing.T) { + name := "testbuilddockerignorewholedir" + defer deleteImages(name) + dockerfile := ` + FROM busybox + COPY . / + RUN [[ ! -e /.gitignore ]] + RUN [[ -f /Makefile ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".dockerignore": ".*\n", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) + } + if _, err = buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore whole dir with .*") +} + +func TestBuildLineBreak(t *testing.T) { + name := "testbuildlinebreak" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - line break with \\") +} + +func TestBuildEOLInLine(t *testing.T) { + name := "testbuildeolinline" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] +RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - end of line in dockerfile instruction") +} + +func TestBuildCommentsShebangs(t *testing.T) { + name := "testbuildcomments" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - comments and shebangs") +} + +func TestBuildUsersAndGroups(t *testing.T) { + name := "testbuildusers" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN id -G +RUN id -Gn +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] + +# Add a "supplementary" group for our dockerio user +RUN echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + t.Fatal(err) + } + logDone("build - users and groups") +} + +func TestBuildEnvUsage(t *testing.T) { + name := "testbuildenvusage" + defer deleteImages(name) + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - environment variables usage") +} + +func TestBuildAddScript(t *testing.T) { + name := "testbuildaddscript" + defer deleteImages(name) + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - add and run script") +} + +func TestBuildAddTar(t *testing.T) { + name := "testbuildaddtar" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN mkdir /existing-directory +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + + logDone("build - ADD tar") +} + +func TestBuildFromGIT(t *testing.T) { + name := "testbuildfromgit" + defer deleteImages(name) + git, err := fakeGIT("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }) + if err != nil { + t.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Author") + if err != nil { + t.Fatal(err) + } + if res != "docker" { + t.Fatalf("Maintainer should be docker, got %s", res) + } + logDone("build - build from GIT") +} + +func TestBuildCleanupCmdOnEntrypoint(t *testing.T) { + name := "testbuildcmdcleanuponentrypoint" + defer deleteImages(name) + if _, err := buildImage(name, + `FROM scratch + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + t.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if expected := ""; res != expected { + t.Fatalf("Cmd %s, expected %s", res, expected) + } + res, err = inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + if expected := "[cat]"; res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + logDone("build - cleanup cmd on ENTRYPOINT") +} + +func TestBuildClearCmd(t *testing.T) { + name := "testbuildclearcmd" + defer deleteImages(name) + _, err := buildImage(name, + `From scratch + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != "[]" { + t.Fatalf("Cmd %s, expected %s", res, "[]") + } + logDone("build - clearcmd") +} + +func TestBuildEmptyCmd(t *testing.T) { + name := "testbuildemptycmd" + defer deleteImages(name) + if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err) + } + if res != "null" { + t.Fatalf("Cmd %s, expected %s", res, "null") + } + logDone("build - empty cmd") +} + +func TestBuildOnBuildOutput(t *testing.T) { + name := "testbuildonbuildparent" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + t.Fatal(err) + } + + childname := "testbuildonbuildchild" + defer deleteImages(childname) + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(out, "Trigger 0, RUN echo foo") { + t.Fatal("failed to find the ONBUILD output", out) + } + + logDone("build - onbuild output") +} + +func TestBuildInvalidTag(t *testing.T) { + name := "abcd:" + makeRandomString(200) + defer deleteImages(name) + _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) + // if the error doesnt check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { + t.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } + logDone("build - invalid tag") +} + +func TestBuildCmdShDashC(t *testing.T) { + name := "testbuildcmdshc" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err, res) + } + + expected := `["/bin/sh","-c","echo cmd"]` + + if res != expected { + t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + + logDone("build - cmd should have sh -c for non-json") +} + +func TestBuildCmdJSONNoShDashC(t *testing.T) { + name := "testbuildcmdjson" + defer deleteImages(name) + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Cmd") + if err != nil { + t.Fatal(err, res) + } + + expected := `["echo","cmd"]` + + if res != expected { + t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + + logDone("build - cmd should not have /bin/sh -c for json") +} + +func TestBuildIgnoreInvalidInstruction(t *testing.T) { + name := "testbuildignoreinvalidinstruction" + defer deleteImages(name) + + out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) + if err != nil { + t.Fatal(err, out) + } + + logDone("build - ignore invalid Dockerfile instruction") +} + +func TestBuildEntrypointInheritance(t *testing.T) { + defer deleteImages("parent", "child") + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + t.Fatal(err) + } + + status, _ := runCommand(exec.Command(dockerBinary, "run", "parent")) + + if status != 130 { + t.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + t.Fatal(err) + } + + status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) + + if status != 5 { + t.Fatal("expected exit code 5 but received %d", status) + } + + logDone("build - clear entrypoint") +} + +func TestBuildEntrypointInheritanceInspect(t *testing.T) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + defer deleteImages(name, name2) + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + t.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name2, "Config.Entrypoint") + if err != nil { + t.Fatal(err, res) + } + + if res != expected { + t.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) + if err != nil { + t.Fatal(err, out) + } + + expected = "quux" + + if strings.TrimSpace(out) != expected { + t.Fatalf("Expected output is %s, got %s", expected, out) + } + + logDone("build - entrypoint override inheritance properly") +} + +func TestBuildRunShEntrypoint(t *testing.T) { + name := "testbuildentrypoint" + defer deleteImages(name) + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT /bin/echo`, + true) + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + + if err != nil { + t.Fatal(err, out) + } + + logDone("build - entrypoint with /bin/echo running successfully") +} + +func TestBuildExoticShellInterpolation(t *testing.T) { + name := "testbuildexoticshellinterpolation" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + t.Fatal(err) + } + + logDone("build - exotic shell interpolation") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_commit_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_commit_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_commit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_commit_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,139 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestCommitAfterContainerIsDone(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages(cleanedImageID) + + logDone("commit - echo foo and commit the image") +} + +func TestCommitWithoutPause(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages(cleanedImageID) + + logDone("commit - echo foo and commit the image with --pause=false") +} + +func TestCommitNewFile(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "commiter") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", imageID, "cat", "/foo") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual != "koye" { + t.Fatalf("expected output koye received %s", actual) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit file and read") +} + +func TestCommitTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + logDone("commit - commit tty") +} + +func TestCommitWithHostBindMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(imageID, err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "bindtest", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit bind mounted file") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_cp_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_cp_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_cp_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_cp_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,373 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func TestCpGarbagePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("../../../../../../../../../../../../", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- garbage path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for garbage path") + } + + logDone("cp - garbage paths relative to container's rootfs") +} + +// Check that relative paths are relative to the container's rootfs +func TestCpRelativePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path, _ := filepath.Rel("/", cpFullPath) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- relative path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for relative path") + } + + logDone("cp - relative paths relative to container's rootfs") +} + +// Check that absolute paths are relative to the container's rootfs +func TestCpAbsolutePath(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute path can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute path") + } + + logDone("cp - absolute paths relative to container's rootfs") +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func TestCpAbsoluteSymlink(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path") + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- absolute symlink can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for absolute symlink") + } + + logDone("cp - absolute symlink relative to container's rootfs") +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func TestCpSymlinkComponent(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { + t.Fatal(err) + } + + hostFile, err := os.Create(cpFullPath) + if err != nil { + t.Fatal(err) + } + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + if err != nil { + t.Fatal(err) + } + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := filepath.Join("/", "container_path", cpTestName) + + _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + if err != nil { + t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) + } + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + if string(test) == cpHostContents { + t.Errorf("output matched host file -- symlink path component can escape container rootfs") + } + + if string(test) != cpContainerContents { + t.Errorf("output doesn't match the input for symlink path component") + } + + logDone("cp - symlink path components relative to container's rootfs") +} + +// Check that cp with unprivileged user doesn't return any error +func TestCpUnprivilegedUser(t *testing.T) { + out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpdir) + + if err = os.Chmod(tmpdir, 0777); err != nil { + t.Fatal(err) + } + + path := cpTestName + + _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) + if err != nil { + t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + } + + logDone("cp - unprivileged user") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_create_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_create_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_create_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_create_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,116 @@ +package main + +import ( + "encoding/json" + "fmt" + "os/exec" + "testing" + "time" +) + +// Make sure we can create a simple container with some args +func TestCreateArgs(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + t.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + c := containers[0] + if c.Path != "command" { + t.Fatalf("Unexpected container path. Expected command, received: %s", c.Path) + } + + b := false + expected := []string{"arg1", "arg2", "arg with space"} + for i, arg := range expected { + if arg != c.Args[i] { + b = true + break + } + } + if len(c.Args) != len(expected) || b { + t.Fatalf("Unexpected args. Expected %v, received: %v", expected, c.Args) + } + + deleteAllContainers() + + logDone("create - args") +} + +// Make sure we can set hostconfig options too +func TestCreateHostConfig(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + t.Fatalf("Error inspecting the container: %s", err) + } + if len(containers) != 1 { + t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + } + + c := containers[0] + if c.HostConfig == nil { + t.Fatalf("Expected HostConfig, got none") + } + + if !c.HostConfig.PublishAllPorts { + t.Fatalf("Expected PublishAllPorts, got false") + } + + deleteAllContainers() + + logDone("create - hostconfig") +} + +// "test123" should be printed by docker create + start +func TestCreateEchoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test123\n" { + t.Errorf("container should've printed 'test123', got '%s'", out) + } + + deleteAllContainers() + + logDone("create - echo test123") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_daemon_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_daemon_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_daemon_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_daemon_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,94 @@ +package main + +import ( + "encoding/json" + "os" + "strings" + "testing" +) + +func TestDaemonRestartWithRunningContainersPorts(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for c, shouldRun := range m { + out, err := d.Cmd("ps") + if err != nil { + t.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, c) { + t.Fatalf(format, prefix, c) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + logDone("daemon - running containers on daemon restart") +} + +func TestDaemonRestartWithVolumesRefs(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + t.Fatal(err, out) + } + if err := d.Restart(); err != nil { + t.Fatal(err) + } + if _, err := d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox"); err != nil { + t.Fatal(err) + } + if out, err := d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + t.Fatal(err, out) + } + v, err := d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1") + if err != nil { + t.Fatal(err) + } + volumes := make(map[string]string) + json.Unmarshal([]byte(v), &volumes) + if _, err := os.Stat(volumes["/foo"]); err != nil { + t.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err) + } + + logDone("daemon - volume refs are restored") +} + +func TestDaemonStartIptablesFalse(t *testing.T) { + d := NewDaemon(t) + if err := d.Start("--iptables=false"); err != nil { + t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } + d.Stop() + + logDone("daemon - started daemon with iptables=false") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_diff_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_diff_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_diff_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_diff_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure that an added file shows up in docker diff +func TestDiffFilenameShownInOutput(t *testing.T) { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains("A /root/bar", line) { + found = true + break + } + } + if !found { + t.Errorf("couldn't find the new file in docker diff's output: %v", out) + } + deleteContainer(cleanCID) + + logDone("diff - check if created file shows up") +} + +// test to ensure GH #3840 doesn't occur any more +func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { + // this is a list of files which shouldn't show up in `docker diff` + dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < 20; i++ { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + deleteContainer(cleanCID) + + for _, filename := range dockerinitFiles { + if strings.Contains(out, filename) { + t.Errorf("found file which should've been ignored %v in diff output", filename) + } + } + } + + logDone("diff - check if ignored files show up in diff") +} + +func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + deleteContainer(cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/kmsg": true, // lxc + } + + for _, line := range strings.Split(out, "\n") { + if line != "" && !expected[line] { + t.Errorf("'%s' is shown in the diff but shouldn't", line) + } + } + + logDone("diff - ensure that only kmsg and ptmx in diff") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_events_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_events_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_events_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_events_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,217 @@ +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + "unicode" + + "github.com/kr/pty" +) + +func TestEventsUntag(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + cmd(t, "tag", image, "utest:tag1") + cmd(t, "tag", image, "utest:tag2") + cmd(t, "rmi", "utest:tag1") + cmd(t, "rmi", "utest:tag2") + eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1") + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + if !strings.Contains(v, "untag") { + t.Fatalf("event should be untag, not %#v", v) + } + } + logDone("events - untags are logged") +} + +func TestEventsPause(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + cmd(t, "run", "-d", "--name", "testeventpause", image, "sleep", "2") + cmd(t, "pause", "testeventpause") + cmd(t, "unpause", "testeventpause") + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + if len(events) <= 1 { + t.Fatalf("Missing expected event") + } + + pauseEvent := strings.Fields(events[len(events)-3]) + unpauseEvent := strings.Fields(events[len(events)-2]) + + if pauseEvent[len(pauseEvent)-1] != "pause" { + t.Fatalf("event should be pause, not %#v", pauseEvent) + } + if unpauseEvent[len(unpauseEvent)-1] != "unpause" { + t.Fatalf("event should be pause, not %#v", unpauseEvent) + } + + logDone("events - pause/unpause is logged") +} + +func TestEventsContainerFailStartDie(t *testing.T) { + out, _, _ := cmd(t, "images", "-q") + image := strings.Split(out, "\n")[0] + eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") + _, _, err := runCommandWithOutput(eventsCmd) + if err == nil { + t.Fatalf("Container run with command blerg should have failed, but it did not") + } + + eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + if len(events) <= 1 { + t.Fatalf("Missing expected event") + } + + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + logDone("events - container failed to start logs die") +} + +func TestEventsLimit(t *testing.T) { + for i := 0; i < 30; i++ { + cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) + } + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ := runCommandWithOutput(eventsCmd) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + if nEvents != 64 { + t.Fatalf("events should be limited to 64, but received %d", nEvents) + } + logDone("events - limited to 64 entries") +} + +func TestEventsContainerEvents(t *testing.T) { + cmd(t, "run", "--rm", "busybox", "true") + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) < 4 { + t.Fatalf("Missing expected event") + } + createEvent := strings.Fields(events[len(events)-4]) + startEvent := strings.Fields(events[len(events)-3]) + dieEvent := strings.Fields(events[len(events)-2]) + destroyEvent := strings.Fields(events[len(events)-1]) + if createEvent[len(createEvent)-1] != "create" { + t.Fatalf("event should be create, not %#v", createEvent) + } + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be pause, not %#v", startEvent) + } + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be pause, not %#v", dieEvent) + } + if destroyEvent[len(destroyEvent)-1] != "destroy" { + t.Fatalf("event should be pause, not %#v", destroyEvent) + } + + logDone("events - container create, start, die, destroy is logged") +} + +func TestEventsImageUntagDelete(t *testing.T) { + name := "testimageevents" + defer deleteImages(name) + _, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + if err != nil { + t.Fatal(err) + } + if err := deleteImages(name); err != nil { + t.Fatal(err) + } + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + t.Log(events) + events = events[:len(events)-1] + if len(events) < 2 { + t.Fatalf("Missing expected event") + } + untagEvent := strings.Fields(events[len(events)-2]) + deleteEvent := strings.Fields(events[len(events)-1]) + if untagEvent[len(untagEvent)-1] != "untag" { + t.Fatalf("untag should be untag, not %#v", untagEvent) + } + if deleteEvent[len(deleteEvent)-1] != "delete" { + t.Fatalf("delete should be delete, not %#v", deleteEvent) + } + logDone("events - image untag, delete is logged") +} + +// #5979 +func TestEventsRedirectStdout(t *testing.T) { + + since := time.Now().Unix() + + cmd(t, "run", "busybox", "true") + + defer deleteAllContainers() + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("could not create temp file: %v", err) + } + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, time.Now().Unix(), file.Name()) + _, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Run(); err != nil { + t.Fatalf("run err for command %q: %v", command, err) + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, c := range scanner.Text() { + if unicode.IsControl(c) { + t.Fatalf("found control character %v", []byte(string(c))) + } + } + } + if err := scanner.Err(); err != nil { + t.Fatalf("Scan err for command %q: %v", command, err) + } + + logDone("events - redirect stdout") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_exec_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_exec_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_exec_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_exec_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,139 @@ +package main + +import ( + "bufio" + "os/exec" + "strings" + "testing" + "time" +) + +func TestExec(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") + + out, _, err = runCommandWithOutput(execCmd) + errorOut(err, t, out) + + out = strings.Trim(out, "\r\n") + + if expected := "test"; out != expected { + t.Errorf("container exec should've printed %q but printed %q", expected, out) + } + + deleteAllContainers() + + logDone("exec - basic test") +} + +func TestExecInteractive(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := execCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := execCmd.Start(); err != nil { + t.Fatal(err) + } + if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil { + t.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + t.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "test" { + t.Fatalf("Output should be 'test', got '%q'", line) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + finish := make(chan struct{}) + go func() { + if err := execCmd.Wait(); err != nil { + t.Fatal(err) + } + close(finish) + }() + select { + case <-finish: + case <-time.After(1 * time.Second): + t.Fatal("docker exec failed to exit on stdin close") + } + + deleteAllContainers() + + logDone("exec - Interactive test") +} + +func TestExecAfterContainerRestart(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + outStr := strings.TrimSpace(out) + if outStr != "hello" { + t.Errorf("container should've printed hello, instead printed %q", outStr) + } + + deleteAllContainers() + + logDone("exec - exec running container after container restart") +} + +func TestExecAfterDaemonRestart(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: err=%v\n%s", err, out) + } + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + if out, err := d.Cmd("start", "top"); err != nil { + t.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) + } + + out, err := d.Cmd("exec", "top", "echo", "hello") + if err != nil { + t.Fatalf("Could not exec on container top: err=%v\n%s", err, out) + } + + outStr := strings.TrimSpace(string(out)) + if outStr != "hello" { + t.Errorf("container should've printed hello, instead printed %q", outStr) + } + + logDone("exec - exec running container after daemon restart") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_export_import_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_export_import_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_export_import_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_export_import_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "testing" +) + +// export an image and try to import it into a new one +func TestExportContainerAndImportImage(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) + } + + exportCmdTemplate := `%v export %v > /tmp/testexp.tar` + exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID) + exportCmd := exec.Command("bash", "-c", exportCmdFinal) + out, _, err = runCommandWithOutput(exportCmd) + errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err)) + + importCmdFinal := `cat /tmp/testexp.tar | docker import - repo/testexp:v1` + importCmd := exec.Command("bash", "-c", importCmdFinal) + out, _, err = runCommandWithOutput(importCmd) + errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) + + deleteContainer(cleanedContainerID) + deleteImages("repo/testexp:v1") + + os.Remove("/tmp/testexp.tar") + + logDone("export - export a container") + logDone("import - import an image") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_history_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_history_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_history_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_history_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,85 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func TestBuildHistory(t *testing.T) { + name := "testbuildhistory" + defer deleteImages(name) + _, err := buildImage(name, `FROM busybox +RUN echo "A" +RUN echo "B" +RUN echo "C" +RUN echo "D" +RUN echo "E" +RUN echo "F" +RUN echo "G" +RUN echo "H" +RUN echo "I" +RUN echo "J" +RUN echo "K" +RUN echo "L" +RUN echo "M" +RUN echo "N" +RUN echo "O" +RUN echo "P" +RUN echo "Q" +RUN echo "R" +RUN echo "S" +RUN echo "T" +RUN echo "U" +RUN echo "V" +RUN echo "W" +RUN echo "X" +RUN echo "Y" +RUN echo "Z"`, + true) + + if err != nil { + t.Fatal(err) + } + + out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) + errorOut(err, t, fmt.Sprintf("image history failed: %v %v", out, err)) + if err != nil || exitCode != 0 { + t.Fatal("failed to get image history") + } + + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("echo \"%s\"", expectedValues[i]) + actualValue := actualValues[i] + + if !strings.Contains(actualValue, echoValue) { + t.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) + } + } + + logDone("history - build history") +} + +func TestHistoryExistentImage(t *testing.T) { + historyCmd := exec.Command(dockerBinary, "history", "busybox") + _, exitCode, err := runCommandWithOutput(historyCmd) + if err != nil || exitCode != 0 { + t.Fatal("failed to get image history") + } + logDone("history - history on existent image must not fail") +} + +func TestHistoryNonExistentImage(t *testing.T) { + historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage") + _, exitCode, err := runCommandWithOutput(historyCmd) + if err == nil || exitCode == 0 { + t.Fatal("history on a non-existent image didn't result in a non-zero exit status") + } + logDone("history - history on non-existent image must fail") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_images_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_images_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_images_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_images_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,62 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" + "time" +) + +func TestImagesEnsureImageIsListed(t *testing.T) { + imagesCmd := exec.Command(dockerBinary, "images") + out, _, err := runCommandWithOutput(imagesCmd) + errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + + if !strings.Contains(out, "busybox") { + t.Fatal("images should've listed busybox") + } + + logDone("images - busybox should be listed") +} + +func TestImagesOrderedByCreationDate(t *testing.T) { + defer deleteImages("order:test_a") + defer deleteImages("order:test_c") + defer deleteImages("order:test_b") + id1, err := buildImage("order:test_a", + `FROM scratch + MAINTAINER dockerio1`, true) + if err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + id2, err := buildImage("order:test_c", + `FROM scratch + MAINTAINER dockerio2`, true) + if err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + id3, err := buildImage("order:test_b", + `FROM scratch + MAINTAINER dockerio3`, true) + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) + errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + imgs := strings.Split(out, "\n") + if imgs[0] != id3 { + t.Fatalf("First image must be %s, got %s", id3, imgs[0]) + } + if imgs[1] != id2 { + t.Fatalf("Second image must be %s, got %s", id2, imgs[1]) + } + if imgs[2] != id1 { + t.Fatalf("Third image must be %s, got %s", id1, imgs[2]) + } + + logDone("images - ordering by creation date") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_import_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_import_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_import_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_import_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestImportDisplay(t *testing.T) { + server, err := fileServer(map[string]string{ + "/cirros.tar.gz": "/cirros.tar.gz", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) + importCmd := exec.Command(dockerBinary, "import", fileURL) + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + t.Errorf("import failed with errors: %v, output: %q", err, out) + } + + if n := strings.Count(out, "\n"); n != 2 { + t.Fatalf("display is messed up: %d '\\n' instead of 2", n) + } + + logDone("import - cirros was imported and display is fine") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_info_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_info_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_info_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_info_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker info succeeds +func TestInfoEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "info") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker info") + } + + stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("info - verify that it works") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_inspect_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_inspect_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_inspect_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_inspect_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,22 @@ +package main + +import ( + "os/exec" + "strings" + "testing" +) + +func TestInspectImage(t *testing.T) { + imageTest := "scratch" + imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) + + out, exitCode, err := runCommandWithOutput(imagesCmd) + if exitCode != 0 || err != nil { + t.Fatalf("failed to inspect image") + } + if id := strings.TrimSuffix(out, "\n"); id != imageTestID { + t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) + } + logDone("inspect - inspect an image") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_kill_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_kill_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_kill_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_kill_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestKillContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + + listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") + out, _, err = runCommandWithOutput(listRunningContainersCmd) + errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + + if strings.Contains(out, cleanedContainerID) { + t.Fatal("killed container is still running") + } + + deleteContainer(cleanedContainerID) + + logDone("kill - kill container running sleep 10") +} + +func TestKillDifferentUserContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + + listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") + out, _, err = runCommandWithOutput(listRunningContainersCmd) + errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + + if strings.Contains(out, cleanedContainerID) { + t.Fatal("killed container is still running") + } + + deleteContainer(cleanedContainerID) + + logDone("kill - kill container running sleep 10 from a different user") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_links_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_links_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_links_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_links_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,156 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "testing" + + "github.com/docker/docker/pkg/iptables" +) + +func TestLinksEtcHostsRegularFile(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if !strings.HasPrefix(out, "-") { + t.Errorf("/etc/hosts should be a regular file") + } + + deleteAllContainers() + + logDone("link - /etc/hosts is a regular file") +} + +func TestLinksEtcHostsContentMatch(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + t.Skip("/etc/hosts does not exist, skip this test") + } + + if out != string(hosts) { + t.Errorf("container") + } + + deleteAllContainers() + + logDone("link - /etc/hosts matches hosts copy") +} + +func TestLinksPingUnlinkedContainers(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + exitCode, err := runCommand(runCmd) + + if exitCode == 0 { + t.Fatal("run ping did not fail") + } else if exitCode != 1 { + errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + } + + logDone("links - ping unlinked container") +} + +func TestLinksPingLinkedContainers(t *testing.T) { + var out string + out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + idA := stripTrailingCharacters(out) + out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + idB := stripTrailingCharacters(out) + cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + cmd(t, "kill", idA) + cmd(t, "kill", idB) + deleteAllContainers() + + logDone("links - ping linked container") +} + +func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { + cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + + childIP := findContainerIP(t, "child") + parentIP := findContainerIP(t, "parent") + + sourceRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"FORWARD", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists(sourceRule...) || !iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules not found") + } + + cmd(t, "rm", "--link", "parent/http") + if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { + t.Fatal("Iptables rules should be removed when unlink") + } + + cmd(t, "kill", "child") + cmd(t, "kill", "parent") + deleteAllContainers() + + logDone("link - verify iptables when link and unlink") +} + +func TestLinksInspectLinksStarted(t *testing.T) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + defer deleteAllContainers() + cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + t.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := deepEqual(expected, output) + + if !equal { + t.Fatalf("Links %s, expected %s", result, expected) + } + logDone("link - links in started container inspect") +} + +func TestLinksInspectLinksStopped(t *testing.T) { + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + defer deleteAllContainers() + cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") + if err != nil { + t.Fatal(err) + } + + err = unmarshalJSON([]byte(links), &result) + if err != nil { + t.Fatal(err) + } + + output := convertSliceOfStringsToMap(result) + + equal := deepEqual(expected, output) + + if !equal { + t.Fatalf("Links %s, but expected %s", result, expected) + } + + logDone("link - links in stopped container inspect") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_logs_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_logs_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_logs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_logs_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,252 @@ +package main + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/timeutils" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func TestLogsContainerSmallerThanPage(t *testing.T) { + testLen := 32767 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo smaller than page size") +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func TestLogsContainerBiggerThanPage(t *testing.T) { + testLen := 32768 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo bigger than page size") +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func TestLogsContainerMuchBiggerThanPage(t *testing.T) { + testLen := 33000 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo much bigger than page size") +} + +func TestLogsTimestamps(t *testing.T) { + testLen := 100 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines := strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l)) + if err != nil { + t.Fatalf("Failed to parse timestamp from %v: %v", l, err) + } + if l[29] != 'Z' { // ensure we have padded 0's + t.Fatalf("Timestamp isn't padded properly: %s", l) + } + } + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs with timestamps") +} + +func TestLogsSeparateStderr(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stdout != "" { + t.Fatalf("Expected empty stdout stream, got %v", stdout) + } + + stderr = strings.TrimSpace(stderr) + if stderr != msg { + t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - separate stderr (without pseudo-tty)") +} + +func TestLogsStderrInStdout(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stderr != "" { + t.Fatalf("Expected empty stderr stream, got %v", stdout) + } + + stdout = strings.TrimSpace(stdout) + if stdout != msg { + t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - stderr in stdout (with pseudo-tty)") +} + +func TestLogsTail(t *testing.T) { + testLen := 100 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines := strings.Split(out, "\n") + + if len(lines) != 6 { + t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) + } + + logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines = strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + deleteContainer(cleanedContainerID) + logDone("logs - logs tail") +} + +func TestLogsFollowStopped(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + if err := logsCmd.Start(); err != nil { + t.Fatal(err) + } + + c := make(chan struct{}) + go func() { + if err := logsCmd.Wait(); err != nil { + t.Fatal(err) + } + close(c) + }() + + select { + case <-c: + case <-time.After(1 * time.Second): + t.Fatal("Following logs is hanged") + } + + deleteContainer(cleanedContainerID) + logDone("logs - logs follow stopped container") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_nat_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_nat_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_nat_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_nat_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "net" + "os/exec" + "strings" + "testing" +) + +func TestNetworkNat(t *testing.T) { + iface, err := net.InterfaceByName("eth0") + if err != nil { + t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) + } + + ifaceAddrs, err := iface.Addrs() + if err != nil || len(ifaceAddrs) == 0 { + t.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) + } + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + if err != nil { + t.Fatalf("Error retrieving the up for eth0: %s", err) + } + + runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to retrieve logs for container: %v %v", cleanedContainerID, err)) + out = strings.Trim(out, "\r\n") + + if expected := "hello world"; out != expected { + t.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP) + } + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + deleteAllContainers() + + logDone("network - make sure nat works through the host") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_port_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_port_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_port_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_port_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,125 @@ +package main + +import ( + "os/exec" + "sort" + "strings" + "testing" +) + +func TestPortList(t *testing.T) { + // one port + runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", firstID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // three port + runCmd = exec.Command(dockerBinary, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + ID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", ID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + t.Error("Port list is not correct") + } + runCmd = exec.Command(dockerBinary, "rm", "-f", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // more and one port mapped to the same container port + runCmd = exec.Command(dockerBinary, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + ID = stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "port", ID, "80") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { + t.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "port", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertPortList(t, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) { + t.Error("Port list is not correct\n", out) + } + runCmd = exec.Command(dockerBinary, "rm", "-f", ID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + deleteAllContainers() + + logDone("port - test port list") +} + +func assertPortList(t *testing.T, out string, expected []string) bool { + //lines := strings.Split(out, "\n") + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + t.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + return false + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + t.Error("|" + lines[i] + "!=" + expected[i] + "|") + return false + } + } + + return true +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_ps_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_ps_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_ps_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_ps_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,362 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +func TestPsListContainers(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + secondID := stripTrailingCharacters(out) + + // not long running + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + thirdID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + fourthID := stripTrailingCharacters(out) + + // make sure third one is not running + runCmd = exec.Command(dockerBinary, "wait", thirdID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // all + runCmd = exec.Command(dockerBinary, "ps", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // running + runCmd = exec.Command(dockerBinary, "ps") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, []string{fourthID, secondID, firstID}) { + t.Error("Container list is not in the correct order") + } + + // from here all flag '-a' is ignored + + // limit + runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected := []string{fourthID, thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "-n=2") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{fourthID, thirdID, secondID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // before + runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{secondID, firstID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID, secondID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & limit + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{fourthID, thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // before & limit + runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + // since & before & limit + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + expected = []string{thirdID} + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if !assertContainerList(out, expected) { + t.Error("Container list is not in the correct order") + } + + deleteAllContainers() + + logDone("ps - test ps options") +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +func TestPsListContainersSize(t *testing.T) { + name := "test_size" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + id, err := getIDByName(name) + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + t.Fatalf("Calling \"docker ps -s\" timed out!") + } + errorOut(err, t, out) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + if foundID != id[:12] { + t.Fatalf("Expected id %s, got %s", id[:12], foundID) + } + expectedSize := "2 B" + foundSize := lines[1][sizeIndex:] + if foundSize != expectedSize { + t.Fatalf("Expected size %q, got %q", expectedSize, foundSize) + } + + deleteAllContainers() + logDone("ps - test ps size") +} + +func TestPsListContainersFilterStatus(t *testing.T) { + // FIXME: this should test paused, but it makes things hang and its wonky + // this is because paused containers can't be controlled by signals + + // start exited container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + firstID := stripTrailingCharacters(out) + + // make sure the exited cintainer is not running + runCmd = exec.Command(dockerBinary, "wait", firstID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + // start running container + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + secondID := stripTrailingCharacters(out) + + // filter containers by exited + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=exited") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + containerOut = strings.TrimSpace(out) + if containerOut != secondID[:12] { + t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter status") +} + +func TestPsListContainersFilterExited(t *testing.T) { + deleteAllContainers() + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstZero, err := getIDByName("zero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + secondZero, err := getIDByName("zero2") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + firstNonZero, err := getIDByName("nonzero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + secondNonZero, err := getIDByName("nonzero2") + if err != nil { + t.Fatal(err) + } + + // filter containers by exited=0 + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids := strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondZero { + t.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) + } + if ids[1] != firstZero { + t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) + } + + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids = strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondNonZero { + t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) + } + if ids[1] != firstNonZero { + t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) + } + logDone("ps - test ps filter exited") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_pull_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_pull_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_pull_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_pull_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// FIXME: we need a test for pulling all aliases for an image (issue #8141) + +// pulling an image from the central registry should work +func TestPullImageFromCentralRegistry(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "scratch") + out, exitCode, err := runCommandWithOutput(pullCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("pulling the scratch image from the registry has failed") + } + logDone("pull - pull scratch") +} + +// pulling a non-existing image from the central registry should return a non-zero exit code +func TestPullNonExistingImage(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err == nil || exitCode == 0 { + t.Fatal("expected non-zero exit status when pulling non-existing image") + } + logDone("pull - pull fooblahblah1234 (non-existing image)") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_push_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_push_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_push_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_push_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// these tests need a freshly started empty private docker registry + +// pulling an image from the central registry should work +func TestPushBusyboxImage(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + // tag the image to upload it tot he private registry + repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) + out, exitCode, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("image tagging failed") + } + + pushCmd := exec.Command(dockerBinary, "push", repoName) + out, exitCode, err = runCommandWithOutput(pushCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + deleteImages(repoName) + + if err != nil || exitCode != 0 { + t.Fatal("pushing the image to the private registry has failed") + } + logDone("push - push busybox to private registry") +} + +// pushing an image without a prefix should throw an error +func TestPushUnprefixedRepo(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + pushCmd := exec.Command(dockerBinary, "push", "busybox") + _, exitCode, err := runCommandWithOutput(pushCmd) + + if err == nil || exitCode == 0 { + t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status") + } + logDone("push - push unprefixed busybox repo --> must fail") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_restart_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_restart_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_restart_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_restart_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,127 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +func TestRestartStoppedContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\n" { + t.Errorf("container should've printed 'foobar'") + } + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\nfoobar\n" { + t.Errorf("container should've printed 'foobar' twice") + } + + deleteAllContainers() + + logDone("restart - echo foobar for stopped container") +} + +func TestRestartRunningContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + time.Sleep(1 * time.Second) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out != "foobar\n" { + t.Errorf("container should've printed 'foobar'") + } + + runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + time.Sleep(1 * time.Second) + + if out != "foobar\nfoobar\n" { + t.Errorf("container should've printed 'foobar' twice") + } + + deleteAllContainers() + + logDone("restart - echo foobar for running container") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func TestRestartWithVolumes(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, out) + + cleanedContainerID := stripTrailingCharacters(out) + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out = strings.Trim(out, " \n\r"); out != "1" { + t.Errorf("expect 1 volume received %s", out) + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) + volumes, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, volumes) + + runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + errorOut(err, t, out) + + if out = strings.Trim(out, " \n\r"); out != "1" { + t.Errorf("expect 1 volume after restart received %s", out) + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) + volumesAfterRestart, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, volumesAfterRestart) + + if volumes != volumesAfterRestart { + volumes = strings.Trim(volumes, " \n\r") + volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r") + t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart) + } + + deleteAllContainers() + + logDone("restart - does not create a new volume on restart") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_rmi_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_rmi_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_rmi_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_rmi_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestRmiWithContainerFails(t *testing.T) { + errSubstr := "is using it" + + // create a container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + // try to delete the image + runCmd = exec.Command(dockerBinary, "rmi", "busybox") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) + } + if !strings.Contains(out, errSubstr) { + t.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) + } + + // make sure it didn't delete the busybox name + images, _, _ := cmd(t, "images") + if !strings.Contains(images, "busybox") { + t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) + } + + deleteContainer(cleanedContainerID) + + logDone("rmi- container using image while rmi, should not remove image name") +} + +func TestRmiTag(t *testing.T) { + imagesBefore, _, _ := cmd(t, "images", "-a") + cmd(t, "tag", "busybox", "utest:tag1") + cmd(t, "tag", "busybox", "utest/docker:tag2") + cmd(t, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+3 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + } + cmd(t, "rmi", "utest/docker:tag2") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+2 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+1 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:tag1") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+0 { + t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } + logDone("tag,rmi- tagging the same images multiple times then removing tags") +} + +func TestRmiTagWithExistingContainers(t *testing.T) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil { + t.Fatalf("Could not tag busybox: %v: %s", err, out) + } + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil { + t.Fatalf("Could not run busybox: %v: %s", err, out) + } + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag)) + if err != nil { + t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) + } + if d := strings.Count(out, "Untagged: "); d != 1 { + t.Fatalf("Expected 1 untagged entry got %d: %q", d, out) + } + + deleteAllContainers() + + logDone("rmi - delete tag with existing containers") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_rm_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_rm_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_rm_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_rm_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,128 @@ +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestRmContainerWithRemovedVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + if err := os.Remove("/tmp/testing"); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("rm - removed volume") +} + +func TestRmContainerWithVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "foo", "-v", "/srv", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("rm - volume") +} + +func TestRmRunningContainer(t *testing.T) { + createRunningContainer(t, "foo") + + // Test cannot remove running container + cmd := exec.Command(dockerBinary, "rm", "foo") + if _, err := runCommand(cmd); err == nil { + t.Fatalf("Expected error, can't rm a running container") + } + + deleteAllContainers() + + logDone("rm - running container") +} + +func TestRmForceRemoveRunningContainer(t *testing.T) { + createRunningContainer(t, "foo") + + // Stop then remove with -s + cmd := exec.Command(dockerBinary, "rm", "-f", "foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("rm - running container with --force=true") +} + +func TestRmContainerOrphaning(t *testing.T) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["/bin/true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + if err != nil { + t.Fatalf("Could not build image %s: %v", img, err) + } + // run container on first image + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil { + t.Fatalf("Could not run image %s: %v: %s", img, err, out) + } + // rebuild dockerfile with a small addition at the end + if _, err := buildImage(img, dockerfile2, true); err != nil { + t.Fatalf("Could not rebuild image %s: %v", img, err) + } + // try to remove the image, should error out. + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil { + t.Fatalf("Expected to error out removing the image, but succeeded: %s", out) + } + // check if we deleted the first image + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) + if err != nil { + t.Fatalf("%v: %s", err, out) + } + if !strings.Contains(out, img1) { + t.Fatalf("Orphaned container (could not find '%s' in docker images): %s", img1, out) + } + + deleteAllContainers() + + logDone("rm - container orphaning") +} + +func TestRmInvalidContainer(t *testing.T) { + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { + t.Fatal("Expected error on rm unknown container, got none") + } else if !strings.Contains(out, "failed to remove one or more containers") { + t.Fatal("Expected output to contain 'failed to remove one or more containers', got %q", out) + } + + logDone("rm - delete unknown container") +} + +func createRunningContainer(t *testing.T, name string) { + cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_run_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_run_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_run_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_run_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2442 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/kr/pty" +) + +// "test123" should be printed by docker run +func TestRunEchoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test123\n" { + t.Errorf("container should've printed 'test123'") + } + + deleteAllContainers() + + logDone("run - echo test123") +} + +// "test" should be printed +func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-m", "4m", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.Trim(out, "\r\n") + + if expected := "test"; out != expected { + t.Errorf("container should've printed %q but printed %q", expected, out) + + } + + deleteAllContainers() + + logDone("run - echo with memory limit") +} + +// "test" should be printed +func TestRunEchoStdoutWitCPULimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + deleteAllContainers() + + logDone("run - echo with CPU limit") +} + +// "test" should be printed +func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "4m", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test', got %q instead", out) + } + + deleteAllContainers() + + logDone("run - echo with CPU and memory limit") +} + +// "test" should be printed +func TestRunEchoNamedContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + if err := deleteContainer("testfoonamedcontainer"); err != nil { + t.Errorf("failed to remove the named container: %v", err) + } + + deleteAllContainers() + + logDone("run - echo with named container") +} + +// docker run should not leak file descriptors +func TestRunLeakyFileDescriptors(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + t.Errorf("container should've printed '0 1 2 3', not: %s", out) + } + + deleteAllContainers() + + logDone("run - check file descriptor leakage") +} + +// it should be possible to ping Google DNS resolver +// this will fail when Internet access is unavailable +func TestRunPingGoogle(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + errorOut(err, t, "container should've been able to ping 8.8.8.8") + + deleteAllContainers() + + logDone("run - ping 8.8.8.8") +} + +// the exit code should be 0 +// some versions of lxc might make this test fail +func TestRunExitCodeZero(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "true") + exitCode, err := runCommand(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + if exitCode != 0 { + t.Errorf("container should've exited with exit code 0") + } + + deleteAllContainers() + + logDone("run - exit with 0") +} + +// the exit code should be 1 +// some versions of lxc might make this test fail +func TestRunExitCodeOne(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "false") + exitCode, err := runCommand(runCmd) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + t.Fatal(err) + } + if exitCode != 1 { + t.Errorf("container should've exited with exit code 1") + } + + deleteAllContainers() + + logDone("run - exit with 1") +} + +// it should be possible to pipe in data via stdin to a process running in a container +// some versions of lxc might make this test fail +func TestRunStdinPipe(t *testing.T) { + runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + logsCmd := exec.Command(dockerBinary, "logs", out) + containerLogs, _, err := runCommandWithOutput(logsCmd) + errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err)) + + containerLogs = stripTrailingCharacters(containerLogs) + + if containerLogs != "blahblah" { + t.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + rmCmd := exec.Command(dockerBinary, "rm", out) + _, _, err = runCommandWithOutput(rmCmd) + errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err)) + + deleteAllContainers() + + logDone("run - pipe in with -i -a stdin") +} + +// the container's ID should be printed when starting a container in detached mode +func TestRunDetachedContainerIDPrinting(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + rmCmd := exec.Command(dockerBinary, "rm", out) + rmOut, _, err := runCommandWithOutput(rmCmd) + errorOut(err, t, "rm failed to remove container") + + rmOut = stripTrailingCharacters(rmOut) + if rmOut != out { + t.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } + + deleteAllContainers() + + logDone("run - print container ID in detached mode") +} + +// the working directory should be set correctly +func TestRunWorkingDirectory(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("-w failed to set working directory") + } + + runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("--workdir failed to set working directory") + } + + deleteAllContainers() + + logDone("run - run with working directory set by -w") + logDone("run - run with working directory set by --workdir") +} + +// pinging Google's DNS resolver should fail when we disable the networking +func TestRunWithoutNetworking(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + deleteAllContainers() + + logDone("run - disable networking with --net=none") + logDone("run - disable networking with -n=false") +} + +// Regression test for #4741 +func TestRunWithVolumesAsFiles(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4741 - volumes from as files") +} + +// Regression test for #4979 +func TestRunWithVolumesFromExited(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4979 - volumes-from on exited container") +} + +// Regression test for #4830 +func TestRunWithRelativePath(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") + if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil { + t.Fatalf("relative path should result in an error") + } + + deleteAllContainers() + + logDone("run - volume with relative path") +} + +func TestRunVolumesMountedAsReadonly(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes as readonly mount") +} + +func TestRunVolumesFromInReadonlyMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes from as readonly mount") +} + +// Regression test for #1201 +func TestRunVolumesFromInReadWriteMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "Invalid mode for volumes-from: bar") { + t.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err) + } + + deleteAllContainers() + + logDone("run - volumes from as read write mount") +} + +func TestVolumesFromGetsProperMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + // Expect this "rw" mode to be be ignored since the inheritted volume is "ro" + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") + if _, err := runCommand(cmd); err == nil { + t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + // Expect this to be read-only since both are "ro" + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file") + if _, err := runCommand(cmd); err == nil { + t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } + + deleteAllContainers() + + logDone("run - volumes from ignores `rw` if inherrited volume is `ro`") +} + +// Test for #1351 +func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("run - volumes from mounted first") +} + +func TestRunMultipleVolumesFrom(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", + "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - multiple volumes from") +} + +// this tests verifies the ID format for the container +func TestRunVerifyContainerID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, exit, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + if exit != 0 { + t.Fatalf("expected exit code 0 received %d", exit) + } + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("Invalid container ID: %s", out) + } + + deleteAllContainers() + + logDone("run - verify container ID") +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func TestRunCreateVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - create docker managed volume") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func TestRunCreateVolumeWithSymlink(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-createvolumewithsymlink", "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN mkdir /foo && ln -s /foo /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + t.Fatalf("could not build 'docker-test-createvolumewithsymlink': %v", err) + } + + cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", "docker-test-createvolumewithsymlink", "sh", "-c", "mount | grep -q /foo/foo") + exitCode, err := runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + var volPath string + cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-createvolumewithsymlink") + volPath, exitCode, err = runCommandWithOutput(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "test-createvolumewithsymlink") + exitCode, err = runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + f, err := os.Open(volPath) + defer f.Close() + if !os.IsNotExist(err) { + t.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } + + deleteImages("docker-test-createvolumewithsymlink") + deleteAllContainers() + + logDone("run - create volume with symlink") +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func TestRunVolumesFromSymlinkPath(t *testing.T) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-volumesfromsymlinkpath", "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN mkdir /baz && ln -s /baz /foo + VOLUME ["/foo/bar"]`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", "docker-test-volumesfromsymlinkpath") + exitCode, err := runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar") + exitCode, err = runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + deleteImages("docker-test-volumesfromsymlinkpath") + deleteAllContainers() + + logDone("run - volumes-from symlink path") +} + +func TestRunExitCode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") + + exit, err := runCommand(cmd) + if err == nil { + t.Fatal("should not have a non nil error") + } + if exit != 72 { + t.Fatalf("expected exit code 72 received %d", exit) + } + + deleteAllContainers() + + logDone("run - correct exit code") +} + +func TestRunUserDefaultsToRoot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - default user") +} + +func TestRunUserByName(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - user by name") +} + +func TestRunUserByID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id") +} + +func TestRunUserByIDBig(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id too big") +} + +func TestRunUserByIDNegative(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("No error, but must be.", out) + } + if !strings.Contains(out, "Uids and gids must be in range") { + t.Fatalf("expected error about uids range, got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, id negative") +} + +func TestRunUserByIDZero(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id, zero uid") +} + +func TestRunUserNotFound(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") + + _, err := runCommand(cmd) + if err == nil { + t.Fatal("unknown user should cause container to fail") + } + deleteAllContainers() + + logDone("run - user not found") +} + +func TestRunTwoConcurrentContainers(t *testing.T) { + group := sync.WaitGroup{} + group.Add(2) + + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + }() + } + + group.Wait() + + deleteAllContainers() + + logDone("run - two concurrent containers") +} + +func TestRunEnvironment(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actualEnv := strings.Split(out, "\n") + if actualEnv[len(actualEnv)-1] == "" { + actualEnv = actualEnv[:len(actualEnv)-1] + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } + + deleteAllContainers() + + logDone("run - verify environment") +} + +func TestRunContainerNetwork(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container network via ping") +} + +// Issue #4681 +func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container loopback when networking disabled") +} + +func TestRunNetHostNotAllowedWithLinks(t *testing.T) { + _, _, err := cmd(t, "run", "--name", "linked", "busybox", "true") + + cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") + _, _, err = runCommandWithOutput(cmd) + if err == nil { + t.Fatal("Expected error") + } + + deleteAllContainers() + + logDone("run - don't allow --net=host to be used with links") +} + +func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + t.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } + + deleteAllContainers() + + logDone("run - test loopback only exists when networking disabled") +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func TestRunFullHostnameSet(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h", "foo.bar.baz", "busybox", "hostname") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + t.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } + deleteAllContainers() + + logDone("run - test fully qualified hostname set with -h") +} + +func TestRunPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mknod") +} + +func TestRunUnPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged can mknod") +} + +func TestRunCapDropInvalid(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + logDone("run - test --cap-drop=CHPASS invalid") +} + +func TestRunCapDropCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=MKNOD cannot mknod") +} + +func TestRunCapDropCannotMknodLowerCase(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=mknod cannot mknod lowercase") +} + +func TestRunCapDropALLCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=ALL cannot mknod") +} + +func TestRunCapDropALLAddMknodCannotMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod") +} + +func TestRunCapAddInvalid(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + logDone("run - test --cap-add=CHPASS invalid") +} + +func TestRunCapAddCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=NET_ADMIN can set eth0 down") +} + +func TestRunCapAddALLCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=ALL can set eth0 down") +} + +func TestRunCapAddALLDropNetAdminCanDownInterface(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down") +} + +func TestRunPrivilegedCanMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mount") +} + +func TestRunUnPrivilegedCannotMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged cannot mount") +} + +func TestRunSysNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("sys should not be writable in a non privileged container") + } + + deleteAllContainers() + + logDone("run - sys not writable in non privileged container") +} + +func TestRunSysWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("sys should be writable in privileged container") + } + + deleteAllContainers() + + logDone("run - sys writable in privileged container") +} + +func TestRunProcNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("proc should not be writable in a non privileged container") + } + + deleteAllContainers() + + logDone("run - proc not writable in non privileged container") +} + +func TestRunProcWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("proc should be writable in privileged container") + } + + deleteAllContainers() + + logDone("run - proc writable in privileged container") +} + +func TestRunWithCpuset(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("container should run successfuly with cpuset of 0: %s", err) + } + + deleteAllContainers() + + logDone("run - cpuset 0") +} + +func TestRunDeviceNumbers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } + deleteAllContainers() + + logDone("run - test device numbers") +} + +func TestRunThatCharacterDevicesActLikeCharacterDevices(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } + deleteAllContainers() + + logDone("run - test that character devices work.") +} + +func TestRunUnprivilegedWithChroot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true") + + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - unprivileged with chroot") +} + +func TestRunAddingOptionalDevices(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + t.Fatalf("expected output /dev/nulo, received %s", actual) + } + deleteAllContainers() + + logDone("run - test --device argument") +} + +func TestRunModeHostname(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + t.Fatalf("expected 'testhostname', but says: %q", actual) + } + + cmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + hostname, err := os.Hostname() + if err != nil { + t.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + t.Fatalf("expected %q, but says: '%s'", hostname, actual) + } + + deleteAllContainers() + + logDone("run - hostname and several network modes") +} + +func TestRunRootWorkdir(t *testing.T) { + s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd") + if err != nil { + t.Fatal(s, err) + } + if s != "/\n" { + t.Fatalf("pwd returned %q (expected /\\n)", s) + } + + deleteAllContainers() + + logDone("run - workdir /") +} + +func TestRunAllowBindMountingRoot(t *testing.T) { + s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") + if err != nil { + t.Fatal(s, err) + } + + deleteAllContainers() + + logDone("run - bind mount / as volume") +} + +func TestRunDisallowBindMountingRootToRoot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(out, err) + } + + deleteAllContainers() + + logDone("run - bind mount /:/ as volume should fail") +} + +// Test recursive bind mount works by default +func TestRunWithVolumesIsRecursive(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + if err := os.MkdirAll(tmpfsDir, 0777); err != nil { + t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) + } + if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { + t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) + } + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal(out, stderr, err) + } + if !strings.Contains(out, filepath.Base(f.Name())) { + t.Fatal("Recursive bind mount test failed. Expected file not found") + } + + deleteAllContainers() + + logDone("run - volumes are bind mounted recursively") +} + +func TestRunDnsDefaultOptions(t *testing.T) { + // ci server has default resolv.conf + // so rewrite it for the test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + // test with file + tmpResolvConf := []byte("nameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + t.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + t.Fatal(err) + } + }() + + cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") + + actual, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Error(err, actual) + return + } + + // check that the actual defaults are there + // if we ever change the defaults from google dns, this will break + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" + if actual != expected { + t.Errorf("expected resolv.conf be: %q, but was: %q", expected, actual) + return + } + + deleteAllContainers() + + logDone("run - dns default options") +} + +func TestRunDnsOptions(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 search mydomain" { + t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) + } + + cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf") + + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1" { + t.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) + } + + logDone("run - dns options") +} + +func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { + var out string + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers := resolvconf.GetNameservers(origResolvConf) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { + t.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + t.Fatalf("expected %q search domain(s), but it has: '%s'", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + } + } + + cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + actualNameservers := resolvconf.GetNameservers([]byte(out)) + if len(actualNameservers) != len(hostNamservers) { + t.Fatalf("expected %q nameserver(s), but it has: '%s'", len(hostNamservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNamservers[i] { + t.Fatalf("expected %q nameserver, but says: '%s'", actualNameservers[i], hostNamservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + t.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + t.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + t.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + t.Fatalf("/etc/resolv.conf does not exist") + } + + hostNamservers = resolvconf.GetNameservers(resolvConf) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + cmd = exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") + + if out, _, err = runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + t.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + } + } + + deleteAllContainers() + + logDone("run - dns options based on host resolv.conf") +} + +func TestRunAddHost(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + t.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } + + logDone("run - add-host option") +} + +// Regression test for #6983 +func TestRunAttachStdErrOnlyTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stderr only with -t") +} + +// Regression test for #6983 +func TestRunAttachStdOutOnlyTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stdout only with -t") +} + +// Regression test for #6983 +func TestRunAttachStdOutAndErrTTYMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + + exitCode, err := runCommand(cmd) + if err != nil { + t.Fatal(err) + } else if exitCode != 0 { + t.Fatalf("Container should have exited with error code 0") + } + + deleteAllContainers() + + logDone("run - Attach stderr and stdout with -t") +} + +func TestRunState(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid1 == "0" { + t.Fatal("Container state Pid 0") + } + + cmd = exec.Command(dockerBinary, "stop", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + state, err = inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "false" { + t.Fatal("Container state is 'running'") + } + pid2, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid2 == pid1 { + t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + cmd = exec.Command(dockerBinary, "start", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + state, err = inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid3, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + if pid3 == pid1 { + t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + logDone("run - test container state.") +} + +// Test for #1737 +func TestRunCopyVolumeUidGid(t *testing.T) { + name := "testrunvolumesuidgid" + defer deleteImages(name) + defer deleteAllContainers() + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + t.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } + + logDone("run - copy uid/gid for volume") +} + +// Test for #1582 +func TestRunCopyVolumeContent(t *testing.T) { + name := "testruncopyvolumecontent" + defer deleteImages(name) + defer deleteAllContainers() + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + t.Fatal(err) + } + + // Test that the content is copied from the image to the volume + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "find", "/hello") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + t.Fatal("Container failed to transfer content to volume") + } + logDone("run - copy volume content") +} + +func TestRunCleanupCmdOnEntrypoint(t *testing.T) { + name := "testrunmdcleanuponentrypoint" + defer deleteImages(name) + defer deleteAllContainers() + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + t.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name) + out, exit, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error: %v, out: %q", err, out) + } + if exit != 0 { + t.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + if out != "root" { + t.Fatalf("Expected output root, got %q", out) + } + logDone("run - cleanup cmd on --entrypoint") +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func TestRunWorkdirExistsAndIsFile(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) { + t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) + } + logDone("run - error on existing file for workdir") +} + +func TestRunExitOnStdinClose(t *testing.T) { + name := "testrunexitonstdinclose" + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat") + + stdin, err := runCmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + t.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + t.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + t.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + t.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + t.Fatal(err) + } + finish := make(chan struct{}) + go func() { + if err := runCmd.Wait(); err != nil { + t.Fatal(err) + } + close(finish) + }() + select { + case <-finish: + case <-time.After(1 * time.Second): + t.Fatal("docker run failed to exit on stdin close") + } + state, err := inspectField(name, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "false" { + t.Fatal("Container must be stopped after stdin closing") + } + logDone("run - exit on stdin closing") +} + +// Test for #2267 +func TestRunWriteHostsFileAndNotCommit(t *testing.T) { + name := "writehosts" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/hosts should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/hosts and not commited") +} + +// Test for #2267 +func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { + name := "writehostname" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/hostname should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/hostname and not commited") +} + +// Test for #2267 +func TestRunWriteResolvFileAndNotCommit(t *testing.T) { + name := "writeresolv" + cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "test2267") { + t.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + cmd = exec.Command(dockerBinary, "diff", name) + if err != nil { + t.Fatal(err, out) + } + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if len(strings.Trim(out, "\r\n")) != 0 { + t.Fatal("diff should be empty") + } + + logDone("run - write to /etc/resolv.conf and not commited") +} + +func TestRunWithBadDevice(t *testing.T) { + name := "baddevice" + cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + t.Fatalf("Output should contain %q, actual out: %q", expected, out) + } + logDone("run - error with bad device") +} + +func TestRunEntrypoint(t *testing.T) { + name := "entrypoint" + cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + expected := "foobar" + if out != expected { + t.Fatalf("Output should be %q, actual out: %q", expected, out) + } + logDone("run - entrypoint") +} + +func TestRunBindMounts(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", t) + + // Test reading from a read-only bind mount + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "touch-me") { + t.Fatal("Container failed to read from bind mount") + } + + // test writing to bind mount + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + _, err = runCommand(cmd) + if err == nil { + t.Fatal("Container bind mounted illegal directory") + } + + // test mount a file + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + _, err = runCommand(cmd) + if err != nil { + t.Fatal(err, out) + } + content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + t.Fatalf("Output should be %q, actual out: %q", expected, content) + } + + logDone("run - bind mounts") +} + +func TestRunMutableNetworkFiles(t *testing.T) { + defer deleteAllContainers() + + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) + if err != nil { + t.Fatal(err, out) + } + + time.Sleep(1 * time.Second) + + contID := strings.TrimSpace(out) + + f, err := os.Open(filepath.Join("/var/lib/docker/containers", contID, fn)) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadAll(f) + f.Close() + + if strings.TrimSpace(string(content)) != "success" { + t.Fatal("Content was not what was modified in the container", string(content)) + } + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) + if err != nil { + t.Fatal(err) + } + + contID = strings.TrimSpace(out) + + resolvConfPath := filepath.Join("/var/lib/docker/containers", contID, fn) + + f, err = os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + if err != nil { + t.Fatal(err) + } + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + t.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + t.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + t.Fatal(err) + } + + f.Close() + + time.Sleep(2 * time.Second) // don't race sleep + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "logs", "c2")) + if err != nil { + t.Fatal(err) + } + + lines := strings.Split(out, "\n") + if strings.TrimSpace(lines[len(lines)-2]) != "success2" { + t.Fatalf("Did not find the correct output in /etc/%s: %s %#v", fn, out, lines) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func TestRunCidFileCleanupIfEmpty(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + cmd := exec.Command(dockerBinary, "run", "--cidfile", tmpCidFile, "scratch") + out, _, err := runCommandWithOutput(cmd) + t.Log(out) + if err == nil { + t.Fatal("Run without command must fail") + } + + if _, err := os.Stat(tmpCidFile); err == nil { + t.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } + deleteAllContainers() + logDone("run - cleanup empty cidfile on fail") +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func TestRunCidFileCheckIDLength(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + t.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + cmd := exec.Command(dockerBinary, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + t.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + t.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + t.Fatalf("cid must be equal to %s, got %s", id, cid) + } + deleteAllContainers() + logDone("run - cidfile contains long id") +} + +func TestRunNetworkNotInitializedNoneMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "--net=none", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + res, err := inspectField(id, "NetworkSettings.IPAddress") + if err != nil { + t.Fatal(err) + } + if res != "" { + t.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } + deleteAllContainers() + logDone("run - network must not be initialized in 'none' mode") +} + +func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + ip, err := inspectField(id, "NetworkSettings.IPAddress") + if err != nil { + t.Fatal(err) + } + iptCmd := exec.Command("iptables", "-D", "FORWARD", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err = runCommandWithOutput(iptCmd) + if err != nil { + t.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + deleteAllContainers() + logDone("run - port should be deallocated even on iptables error") +} + +func TestRunPortInUse(t *testing.T) { + port := "1234" + l, err := net.Listen("tcp", ":"+port) + if err != nil { + t.Fatal(err) + } + defer l.Close() + cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "address already in use") { + t.Fatalf("Out must be about \"address already in use\", got %s", out) + } + + deleteAllContainers() + logDone("run - fail if port already in use") +} + +// https://github.com/docker/docker/issues/8428 +func TestRunPortProxy(t *testing.T) { + defer deleteAllContainers() + + port := "12345" + cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err) + } + + // connect for 10 times here. This will trigger 10 EPIPES in the child + // process and kill it when it writes to a closed stdout/stderr + for i := 0; i < 10; i++ { + net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port)) + } + + listPs := exec.Command("sh", "-c", "ps ax | grep docker") + out, _, err = runCommandWithOutput(listPs) + if err != nil { + t.Errorf("list docker process failed with output %s, error %s", out, err) + } + if strings.Contains(out, "docker ") { + t.Errorf("Unexpected defunct docker process") + } + if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") { + t.Errorf("Failed to find docker-proxy process, got %s", out) + } + + logDone("run - proxy should work with unavailable port") +} + +// Regression test for #7792 +func TestRunMountOrdering(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mount. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + t.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + deleteAllContainers() + logDone("run - volumes are mounted in the correct order") +} + +func TestRunExecDir(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + execDir := filepath.Join(execDriverPath, id) + stateFile := filepath.Join(execDir, "state.json") + contFile := filepath.Join(execDir, "container.json") + + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + + stopCmd := exec.Command(dockerBinary, "stop", id) + out, _, err = runCommandWithOutput(stopCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err == nil { + t.Fatalf("Statefile %q is exists for stopped container!", stateFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + fi, err = os.Stat(contFile) + if err == nil { + t.Fatalf("Container file %q is exists for stopped container!", contFile) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + startCmd := exec.Command(dockerBinary, "start", id) + out, _, err = runCommandWithOutput(startCmd) + if err != nil { + t.Fatal(err, out) + } + { + fi, err := os.Stat(execDir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("%q must be a directory", execDir) + } + fi, err = os.Stat(stateFile) + if err != nil { + t.Fatal(err) + } + fi, err = os.Stat(contFile) + if err != nil { + t.Fatal(err) + } + } + rmCmd := exec.Command(dockerBinary, "rm", "-f", id) + out, _, err = runCommandWithOutput(rmCmd) + if err != nil { + t.Fatal(err, out) + } + { + _, err := os.Stat(execDir) + if err == nil { + t.Fatal(err) + } + if err == nil { + t.Fatalf("Exec directory %q is exists for removed container!", execDir) + } + if !os.IsNotExist(err) { + t.Fatalf("Error should be about non-existing, got %s", err) + } + } + + logDone("run - check execdriver dir behavior") +} + +// #6509 +func TestRunRedirectStdout(t *testing.T) { + + defer deleteAllContainers() + + checkRedirect := func(command string) { + _, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + ch := make(chan struct{}) + if err := cmd.Start(); err != nil { + t.Fatalf("start err: %v", err) + } + go func() { + if err := cmd.Wait(); err != nil { + t.Fatalf("wait err=%v", err) + } + close(ch) + }() + + select { + case <-time.After(time.Second): + t.Fatal("command timeout") + case <-ch: + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") + + logDone("run - redirect stdout") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) { + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + deleteAllContainers() + logDone("run - can remount old bindmount volume") +} + +func TestVolumesNoCopyData(t *testing.T) { + defer deleteImages("dataimage") + defer deleteAllContainers() + if _, err := buildImage("dataimage", + `FROM busybox + RUN mkdir -p /foo + RUN touch /foo/bar`, + true); err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--name", "test", "-v", "/foo", "busybox") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { + t.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir, err := ioutil.TempDir("", "docker_test_bind_mount_copy_data") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + + cmd = exec.Command(dockerBinary, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar") + if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { + t.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } + + logDone("run - volumes do not copy data for volumes-from and bindmounts") +} + +func TestRunVolumesNotRecreatedOnStart(t *testing.T) { + // Clear out any remnants from other tests + deleteAllContainers() + info, err := ioutil.ReadDir(volumesConfigPath) + if err != nil { + t.Fatal(err) + } + if len(info) > 0 { + for _, f := range info { + if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil { + t.Fatal(err) + } + } + } + + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "start", "lone_starr") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + info, err = ioutil.ReadDir(volumesConfigPath) + if err != nil { + t.Fatal(err) + } + if len(info) != 1 { + t.Fatalf("Expected only 1 volume have %v", len(info)) + } + + logDone("run - volumes not recreated on start") +} + +func TestRunNoOutputFromPullInStdout(t *testing.T) { + defer deleteAllContainers() + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + t.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + t.Fatalf("Stdout contains output from pull: %s", stdout) + } + logDone("run - no output from pull in stdout") +} + +func TestRunVolumesCleanPaths(t *testing.T) { + defer deleteAllContainers() + + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME /foo/`, + true); err != nil { + t.Fatal(err) + } + defer deleteImages("run_volumes_clean_paths") + + cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /foo\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) + } + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /bar\n%q", out) + } + + logDone("run - volume paths are cleaned") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_save_load_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_save_load_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_save_load_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_save_load_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,261 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "testing" +) + +// save a repo and try to load it using stdout +func TestSaveAndLoadRepoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + + saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + + if before != after { + t.Fatalf("inspect is not the same after a save / load") + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + os.Remove("/tmp/foobar-save-load-test.tar") + + logDone("save - save a repo using stdout") + logDone("load - load a repo using stdout") +} + +func TestSaveSingleTag(t *testing.T) { + repoName := "foobar-save-single-tag-test" + + tagCmdFinal := fmt.Sprintf("%v tag busybox:latest %v:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + idCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) + idCmd := exec.Command("bash", "-c", idCmdFinal) + out, _, err = runCommandWithOutput(idCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + saveCmdFinal := fmt.Sprintf("%v save %v:latest | tar t | grep -E '(^repositories$|%v)'", dockerBinary, repoName, cleanedImageID) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo with image ID and 'repositories' file: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save a specific image:tag") +} + +func TestSaveImageId(t *testing.T) { + repoName := "foobar-save-image-id-test" + + tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + idLongCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) + idLongCmd := exec.Command("bash", "-c", idLongCmdFinal) + out, _, err = runCommandWithOutput(idLongCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + + cleanedLongImageID := stripTrailingCharacters(out) + + idShortCmdFinal := fmt.Sprintf("%v images -q %v", dockerBinary, repoName) + idShortCmd := exec.Command("bash", "-c", idShortCmdFinal) + out, _, err = runCommandWithOutput(idShortCmd) + errorOut(err, t, fmt.Sprintf("failed to get repo short ID: %v %v", out, err)) + + cleanedShortImageID := stripTrailingCharacters(out) + + saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep %v", dockerBinary, cleanedShortImageID, cleanedLongImageID) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo with image ID: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save a image by ID") +} + +// save a repo and try to load it using flags +func TestSaveAndLoadRepoFlags(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + + saveCmdTemplate := `%v save -o /tmp/foobar-save-load-test.tar %v` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmdFinal := `docker load -i /tmp/foobar-save-load-test.tar` + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + + if before != after { + t.Fatalf("inspect is not the same after a save / load") + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + os.Remove("/tmp/foobar-save-load-test.tar") + + logDone("save - save a repo using -o") + logDone("load - load a repo using -i") +} + +func TestSaveMultipleNames(t *testing.T) { + repoName := "foobar-save-multi-name-test" + + // Make one image + tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v-one:latest", dockerBinary, repoName) + tagCmd := exec.Command("bash", "-c", tagCmdFinal) + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + // Make two images + tagCmdFinal = fmt.Sprintf("%v tag scratch:latest %v-two:latest", dockerBinary, repoName) + tagCmd = exec.Command("bash", "-c", tagCmdFinal) + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + + saveCmdFinal := fmt.Sprintf("%v save %v-one %v-two:latest | tar xO repositories | grep -q -E '(-one|-two)'", dockerBinary, repoName, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save multiple repos: %v %v", out, err)) + + deleteImages(repoName) + + logDone("save - save by multiple names") +} + +// Issue #6722 #5892 ensure directories are included in changes +func TestSaveDirectoryPermissions(t *testing.T) { + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + if err != nil { + t.Errorf("failed to create temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + defer deleteImages(name) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + if err != nil { + t.Fatal(err) + } + + saveCmdFinal := fmt.Sprintf("%s save %s | tar -xf - -C %s", dockerBinary, name, extractionDirectory) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err := runCommandWithOutput(saveCmd) + if err != nil { + t.Errorf("failed to save and extract image: %s", out) + } + + dirs, err := ioutil.ReadDir(extractionDirectory) + if err != nil { + t.Errorf("failed to get a listing of the layer directories: %s", err) + } + + found := false + for _, entry := range dirs { + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + if err != nil { + t.Fatalf("failed to open %s: %s", layerPath, err) + } + + entries, err := ListTar(f) + if err != nil { + t.Fatalf("encountered error while listing tar entries: %s", err) + } + + if reflect.DeepEqual(entries, layerEntries) || reflect.DeepEqual(entries, layerEntriesAUFS) { + found = true + break + } + } + } + + if !found { + t.Fatalf("failed to find the layer with the right content listing") + } + + logDone("save - ensure directories exist in exported layers") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_search_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_search_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_search_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_search_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,25 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// search for repos named "registry" on the central registry +func TestSearchOnCentralRegistry(t *testing.T) { + searchCmd := exec.Command(dockerBinary, "search", "busybox") + out, exitCode, err := runCommandWithOutput(searchCmd) + errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to search on the central registry") + } + + if !strings.Contains(out, "Busybox base image.") { + t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") + } + + logDone("search - search for repositories named (or containing) 'Busybox base image.'") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_start_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_start_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_start_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_start_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,67 @@ +package main + +import ( + "os/exec" + "strings" + "testing" + "time" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func TestStartAttachReturnsOnError(t *testing.T) { + defer deleteAllContainers() + + cmd(t, "run", "-d", "--name", "test", "busybox") + cmd(t, "stop", "test") + + // Expect this to fail because the above container is stopped, this is what we want + if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { + t.Fatal("Expected error but got none") + } + + ch := make(chan struct{}) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil { + t.Fatal("Expected error but got none") + } + close(ch) + }() + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("Attach did not exit properly") + } + + logDone("start - error on start with attach exits") +} + +// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s +func TestStartVolumesFromFailsCleanly(t *testing.T) { + defer deleteAllContainers() + + // Create the first data volume + cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + + // Expect this to fail because the data test after contaienr doesn't exist yet + if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { + t.Fatal("Expected error but got none") + } + + // Create the second data volume + cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + + // Now, all the volumes should be there + cmd(t, "start", "consumer") + + // Check that we have the volumes we want + out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + n_volumes := strings.Trim(out, " \r\n'") + if n_volumes != "2" { + t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) + } + + logDone("start - missing containers in --volumes-from did not affect subsequent runs") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_tag_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_tag_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_tag_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_tag_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,90 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// tagging a named image in a new unprefixed repo should work +func TestTagUnprefixedRepoByName(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") + out, _, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox -> testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func TestTagUnprefixedRepoByID(t *testing.T) { + getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") + out, _, err := runCommandWithOutput(getIDCmd) + errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) + + cleanedImageID := stripTrailingCharacters(out) + tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox's image ID -> testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func TestTagInvalidUnprefixedRepo(t *testing.T) { + + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"} + + for _, repo := range invalidRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err == nil { + t.Fatalf("tag busybox %v should have failed", repo) + } + } + logDone("tag - busybox invalid repo names --> must fail") +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func TestTagInvalidPrefixedRepo(t *testing.T) { + long_tag := makeRandomString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag} + + for _, repotag := range invalidTags { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag) + _, _, err := runCommandWithOutput(tagCmd) + if err == nil { + t.Fatalf("tag busybox %v should have failed", repotag) + } + } + logDone("tag - busybox with invalid repo:tagnames --> must fail") +} + +// ensure we allow the use of valid tags +func TestTagValidPrefixedRepo(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"} + + for _, repo := range validRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err != nil { + t.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + logMessage := fmt.Sprintf("tag - busybox %v", repo) + logDone(logMessage) + } +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_top_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_top_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_top_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_top_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,91 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestTopMultipleArgs(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + if !strings.Contains(out, "PID") { + errorOut(nil, t, fmt.Sprintf("did not see PID after top -o pid")) + } + + logDone("top - multiple arguments") +} + +func TestTopNonPrivileged(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) + out2, _, err2 := runCommandWithOutput(topCmd) + errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + _, err = runCommand(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + + deleteContainer(cleanedContainerID) + + if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") + } else if !strings.Contains(out, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") + } else if !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") + } + + logDone("top - sleep process should be listed in non privileged mode") +} + +func TestTopPrivileged(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) + out2, _, err2 := runCommandWithOutput(topCmd) + errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + _, err = runCommand(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + + deleteContainer(cleanedContainerID) + + if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") + } else if !strings.Contains(out, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") + } else if !strings.Contains(out2, "sleep 20") { + t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") + } + + logDone("top - sleep process should be listed in privileged mode") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_cli_version_test.go docker.io-1.3.2~dfsg1/integration-cli/docker_cli_version_test.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_cli_version_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_cli_version_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker version works +func TestVersionEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "version") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker version") + } + + stringsToCheck := []string{ + "Client version:", + "Client API version:", + "Go version (client):", + "Git commit (client):", + "Server version:", + "Server API version:", + "Go version (server):", + "Git commit (server):", + } + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("version - verify that it works and that the output is properly formatted") +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_test_vars.go docker.io-1.3.2~dfsg1/integration-cli/docker_test_vars.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_test_vars.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_test_vars.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "os" + "os/exec" +) + +var ( + // the docker binary to use + dockerBinary = "docker" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + dockerBasePath = "/var/lib/docker" + execDriverPath = dockerBasePath + "/execdriver/native" + volumesConfigPath = dockerBasePath + "/volumes" + volumesStoragePath = dockerBasePath + "/vfs/dir" + + workingDirectory string +) + +func init() { + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } else { + whichCmd := exec.Command("which", "docker") + out, _, err := runCommandWithOutput(whichCmd) + if err == nil { + dockerBinary = stripTrailingCharacters(out) + } else { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary") + os.Exit(1) + } + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/docker_utils.go docker.io-1.3.2~dfsg1/integration-cli/docker_utils.go --- docker.io-0.9.1~dfsg1/integration-cli/docker_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/docker_utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,714 @@ +package main + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "testing" + "time" +) + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + t *testing.T + logFile *os.File + folder string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + execDriver string + wait chan error +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as daemon123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(t *testing.T) *Daemon { + dest := os.Getenv("DEST") + if dest == "" { + t.Fatal("Please set the DEST environment variable") + } + + dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().Unix())) + daemonFolder, err := filepath.Abs(dir) + if err != nil { + t.Fatalf("Could not make '%s' an absolute path: %v", dir, err) + } + + if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { + t.Fatalf("Could not create %s/graph directory", daemonFolder) + } + + return &Daemon{ + t: t, + folder: daemonFolder, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + execDriver: os.Getenv("DOCKER_EXECDRIVER"), + } +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(arg ...string) error { + dockerBinary, err := exec.LookPath(dockerBinary) + if err != nil { + d.t.Fatalf("could not find docker binary in $PATH: %v", err) + } + + args := []string{ + "--host", d.sock(), + "--daemon", "--debug", + "--graph", fmt.Sprintf("%s/graph", d.folder), + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + } + if d.storageDriver != "" { + args = append(args, "--storage-driver", d.storageDriver) + } + if d.execDriver != "" { + args = append(args, "--exec-driver", d.execDriver) + } + + args = append(args, arg...) + d.cmd = exec.Command(dockerBinary, args...) + + d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) + } + + d.cmd.Stdout = d.logFile + d.cmd.Stderr = d.logFile + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("could not start daemon container: %v", err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.t.Log("exiting daemon") + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + for { + d.t.Log("waiting for daemon to start") + select { + case <-time.After(2 * time.Second): + return errors.New("timeout: daemon does not respond") + case <-tick: + c, err := net.Dial("unix", filepath.Join(d.folder, "docker.sock")) + if err != nil { + continue + } + + client := httputil.NewClientConn(c, nil) + defer client.Close() + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + d.t.Fatalf("could not create new request: %v", err) + } + + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.t.Logf("received status != 200 OK: %s", resp.Status) + } + + d.t.Log("daemon started") + return nil + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { + return fmt.Errorf("could not save busybox image: %v", err) + } + } + // loading busybox image to this daemon + if _, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %v", err) + } + if err := os.Remove(bb); err != nil { + d.t.Logf("Could not remove %s: %v", bb, err) + } + return nil +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out: + for { + select { + case err := <-d.wait: + return err + case <-time.After(20 * time.Second): + d.t.Log("timeout") + break out + case <-tick: + d.t.Logf("Attempt #%d: daemon is still running with pid %d", i+1, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + i++ + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.t.Logf("Could not kill daemon: %v", err) + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + return d.Start(arg...) +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://%s/docker.sock", d.folder) +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(name string, arg ...string) (string, error) { + args := []string{"--host", d.sock(), name} + args = append(args, arg...) + c := exec.Command(dockerBinary, args...) + b, err := c.CombinedOutput() + return string(b), err +} + +func sockRequest(method, endpoint string) ([]byte, error) { + // FIX: the path to sock should not be hardcoded + sock := filepath.Join("/", "var", "run", "docker.sock") + c, err := net.DialTimeout("unix", sock, time.Duration(10*time.Second)) + if err != nil { + return nil, fmt.Errorf("could not dial docker sock at %s: %v", sock, err) + } + + client := httputil.NewClientConn(c, nil) + defer client.Close() + + req, err := http.NewRequest(method, endpoint, nil) + req.Header.Set("Content-Type", "application/json") + if err != nil { + return nil, fmt.Errorf("could not create new request: %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("could not perform request: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("received status != 200 OK: %s", resp.Status) + } + + return ioutil.ReadAll(resp.Body) +} + +func deleteContainer(container string) error { + container = strings.Replace(container, "\n", " ", -1) + container = strings.Trim(container, " ") + killArgs := fmt.Sprintf("kill %v", container) + killSplitArgs := strings.Split(killArgs, " ") + killCmd := exec.Command(dockerBinary, killSplitArgs...) + runCommand(killCmd) + rmArgs := fmt.Sprintf("rm -v %v", container) + rmSplitArgs := strings.Split(rmArgs, " ") + rmCmd := exec.Command(dockerBinary, rmSplitArgs...) + exitCode, err := runCommand(rmCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") + } + + return err +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + + if err = deleteContainer(containers); err != nil { + return err + } + return nil +} + +func deleteImages(images ...string) error { + rmiCmd := exec.Command(dockerBinary, "rmi", strings.Join(images, " ")) + exitCode, err := runCommand(rmiCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") + } + + return err +} + +func imageExists(image string) error { + inspectCmd := exec.Command(dockerBinary, "inspect", image) + exitCode, err := runCommand(inspectCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("couldn't find image '%s'", image) + } + return err +} + +func pullImageIfNotExist(image string) (err error) { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + err = fmt.Errorf("image '%s' wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return +} + +// deprecated, use dockerCmd instead +func cmd(t *testing.T, args ...string) (string, int, error) { + return dockerCmd(t, args...) +} + +func dockerCmd(t *testing.T, args ...string) (string, int, error) { + out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + return out, status, err +} + +// execute a docker ocmmand with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { + out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory +func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { + dockerCommand := exec.Command(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) + if err != nil { + return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + } + return out, status, err +} + +func findContainerIP(t *testing.T, id string) string { + cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := stripTrailingCharacters(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +type FakeContext struct { + Dir string +} + +func (f *FakeContext) Add(file, content string) error { + filepath := path.Join(f.Dir, file) + dirpath := path.Dir(filepath) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(filepath, []byte(content), 0644) +} + +func (f *FakeContext) Delete(file string) error { + filepath := path.Join(f.Dir, file) + return os.RemoveAll(filepath) +} + +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return nil, err + } + return ctx, nil +} + +type FakeStorage struct { + *FakeContext + *httptest.Server +} + +func (f *FakeStorage) Close() error { + f.Server.Close() + return f.FakeContext.Close() +} + +func fakeStorage(files map[string]string) (*FakeStorage, error) { + tmp, err := ioutil.TempDir("", "fake-storage") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &FakeStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +func inspectField(name, field string) (string, error) { + format := fmt.Sprintf("{{.%s}}", field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldJSON(name, field string) (string, error) { + format := fmt.Sprintf("{{json .%s}}", field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldMap(name, path, field string) (string, error) { + format := fmt.Sprintf("{{index .%s %q}}", path, field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectField(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(t *testing.T, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if err != nil || exitCode != 0 { + return 0, false, fmt.Errorf("'%s' doesn't exist: %s", id, err) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImage(name, dockerfile string, useCache bool) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +func buildImageFromPath(name, path string, useCache bool) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type FakeGIT struct { + *httptest.Server + Root string + RepoURL string +} + +func (g *FakeGIT) Close() { + g.Server.Close() + os.RemoveAll(g.Root) +} + +func fakeGIT(name string, files map[string]string) (*FakeGIT, error) { + tmp, err := ioutil.TempDir("", "fake-git-repo") + if err != nil { + return nil, err + } + ctx := &FakeContext{tmp} + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + handler := http.FileServer(http.Dir(root)) + server := httptest.NewServer(handler) + return &FakeGIT{ + Server: server, + Root: root, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL, name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call t.Fatal() at the first error. +func writeFile(dst, content string, t *testing.T) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + t.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + t.Fatal(err) + } + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + t.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call t.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, t *testing.T) (content string) { + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + return string(data) +} diff -Nru docker.io-0.9.1~dfsg1/integration-cli/MAINTAINERS docker.io-1.3.2~dfsg1/integration-cli/MAINTAINERS --- docker.io-0.9.1~dfsg1/integration-cli/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/integration-cli/utils.go docker.io-1.3.2~dfsg1/integration-cli/utils.go --- docker.io-0.9.1~dfsg1/integration-cli/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/integration-cli/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,268 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +func processExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = processExitCode(err) + output = string(out) + return +} + +func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + var ( + stderrBuffer, stdoutBuffer bytes.Buffer + ) + exitCode = 0 + cmd.Stderr = &stderrBuffer + cmd.Stdout = &stdoutBuffer + err = cmd.Run() + exitCode = processExitCode(err) + + stdout = stdoutBuffer.String() + stderr = stderrBuffer.String() + return +} + +var ErrCmdTimeout = fmt.Errorf("command timed out") + +func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { + done := make(chan error) + go func() { + output, exitCode, err = runCommandWithOutput(cmd) + if err != nil || exitCode != 0 { + done <- fmt.Errorf("failed to run command: %s", err) + return + } + done <- nil + }() + select { + case <-time.After(timeout): + killFailed := cmd.Process.Kill() + if killFailed == nil { + fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, err) + } + err = ErrCmdTimeout + case <-done: + break + } + return +} + +func runCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Run() + exitCode = processExitCode(err) + return +} + +func startCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Start() + exitCode = processExitCode(err) + return +} + +func logDone(message string) { + fmt.Printf("[PASSED]: %s\n", message) +} + +func stripTrailingCharacters(target string) string { + target = strings.Trim(target, "\n") + target = strings.Trim(target, " ") + return target +} + +func errorOut(err error, t *testing.T, message string) { + if err != nil { + t.Fatal(message) + } +} + +func errorOutOnNonNilError(err error, t *testing.T, message string) { + if err == nil { + t.Fatalf(message) + } +} + +func nLines(s string) int { + return strings.Count(s, "\n") +} + +func unmarshalJSON(data []byte, result interface{}) error { + err := json.Unmarshal(data, result) + if err != nil { + return err + } + + return nil +} + +func deepEqual(expected interface{}, result interface{}) bool { + return reflect.DeepEqual(result, expected) +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{"run", "--name", contID}, args...) + cmd := exec.Command(dockerBinary, args...) + if _, err := runCommand(cmd); err != nil { + return err + } + + if err := waitRun(contID); err != nil { + return err + } + + return nil +} + +func waitRun(contID string) error { + after := time.After(5 * time.Second) + + for { + cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", contID) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return fmt.Errorf("error executing docker inspect: %v", err) + } + + if strings.Contains(out, "true") { + break + } + + select { + case <-after: + return fmt.Errorf("container did not come up in time") + default: + } + + time.Sleep(100 * time.Millisecond) + } + + return nil +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +type FileServer struct { + *httptest.Server +} + +func fileServer(files map[string]string) (*FileServer, error) { + var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + if filePath, found := files[r.URL.Path]; found { + http.ServeFile(w, r, filePath) + } else { + http.Error(w, http.StatusText(404), 404) + } + } + + for _, file := range files { + if _, err := os.Stat(file); err != nil { + return nil, err + } + } + server := httptest.NewServer(handler) + return &FileServer{ + Server: server, + }, nil +} + +func copyWithCP(source, target string) error { + copyCmd := exec.Command("cp", "-rp", source, target) + out, exitCode, err := runCommandWithOutput(copyCmd) + if err != nil || exitCode != 0 { + return fmt.Errorf("failed to copy: error: %q ,output: %q", err, out) + } + return nil +} + +func makeRandomString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/api_temp.go docker.io-1.3.2~dfsg1/libcontainer/api_temp.go --- docker.io-0.9.1~dfsg1/libcontainer/api_temp.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/api_temp.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,34 @@ +/* +Temporary API endpoint for libcontainer while the full API is finalized (api.go). +*/ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/docker/libcontainer/network" +) + +// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that. +// DEPRECATED: The below portions are only to be used during the transition to the official API. +// Returns all available stats for the given container. +func GetStats(container *Config, state *State) (*ContainerStats, error) { + var ( + err error + stats = &ContainerStats{} + ) + + if systemd.UseSystemd() { + stats.CgroupStats, err = systemd.GetStats(container.Cgroups) + } else { + stats.CgroupStats, err = fs.GetStats(container.Cgroups) + } + + if err != nil { + return stats, err + } + + stats.NetworkStats, err = network.GetStats(&state.NetworkState) + + return stats, err +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/apparmor/apparmor_disabled.go docker.io-1.3.2~dfsg1/libcontainer/apparmor/apparmor_disabled.go --- docker.io-0.9.1~dfsg1/libcontainer/apparmor/apparmor_disabled.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/apparmor/apparmor_disabled.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,11 @@ +// +build !apparmor !linux + +package apparmor + +func IsEnabled() bool { + return false +} + +func ApplyProfile(name string) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/apparmor/apparmor.go docker.io-1.3.2~dfsg1/libcontainer/apparmor/apparmor.go --- docker.io-0.9.1~dfsg1/libcontainer/apparmor/apparmor.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/apparmor/apparmor.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,35 @@ +// +build apparmor,linux + +package apparmor + +// #cgo LDFLAGS: -lapparmor +// #include +// #include +import "C" +import ( + "io/ioutil" + "os" + "unsafe" +) + +func IsEnabled() bool { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + } + return false +} + +func ApplyProfile(name string) error { + if name == "" { + return nil + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + if _, err := C.aa_change_onexec(cName); err != nil { + return err + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/apparmor/gen.go docker.io-1.3.2~dfsg1/libcontainer/apparmor/gen.go --- docker.io-0.9.1~dfsg1/libcontainer/apparmor/gen.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/apparmor/gen.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,94 @@ +package apparmor + +import ( + "io" + "os" + "text/template" +) + +type data struct { + Name string + Imports []string + InnerImports []string +} + +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +` + +func generateProfile(out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(baseTemplate) + if err != nil { + return err + } + data := &data{ + Name: "docker-default", + } + if tuntablesExists() { + data.Imports = append(data.Imports, "#include ") + } else { + data.Imports = append(data.Imports, "@{PROC}=/proc/") + } + if abstrctionsEsists() { + data.InnerImports = append(data.InnerImports, "#include ") + } + if err := compiled.Execute(out, data); err != nil { + return err + } + return nil +} + +// check if the tunables/global exist +func tuntablesExists() bool { + _, err := os.Stat("/etc/apparmor.d/tunables/global") + return err == nil +} + +// check if abstractions/base exist +func abstrctionsEsists() bool { + _, err := os.Stat("/etc/apparmor.d/abstractions/base") + return err == nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/apparmor/setup.go docker.io-1.3.2~dfsg1/libcontainer/apparmor/setup.go --- docker.io-0.9.1~dfsg1/libcontainer/apparmor/setup.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/apparmor/setup.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,44 @@ +package apparmor + +import ( + "fmt" + "os" + "os/exec" + "path" +) + +const ( + DefaultProfilePath = "/etc/apparmor.d/docker" +) + +func InstallDefaultProfile() error { + if !IsEnabled() { + return nil + } + + // Make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { + return err + } + + f, err := os.OpenFile(DefaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := generateProfile(f); err != nil { + f.Close() + return err + } + f.Close() + + cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker") + // to use the parser directly we have to make sure we are in the correct + // dir with the profile + cmd.Dir = "/etc/apparmor.d" + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgroups.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgroups.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgroups.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgroups.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,60 @@ +package cgroups + +import ( + "fmt" + + "github.com/docker/libcontainer/devices" +) + +type FreezerState string + +const ( + Undefined FreezerState = "" + Frozen FreezerState = "FROZEN" + Thawed FreezerState = "THAWED" +) + +type NotFoundError struct { + Subsystem string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) +} + +func NewNotFoundError(sub string) error { + return &NotFoundError{ + Subsystem: sub, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*NotFoundError) + return ok +} + +type Cgroup struct { + Name string `json:"name,omitempty"` + Parent string `json:"parent,omitempty"` // name of parent cgroup or slice + + AllowAllDevices bool `json:"allow_all_devices,omitempty"` // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. + AllowedDevices []*devices.Device `json:"allowed_devices,omitempty"` + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process + Slice string `json:"slice,omitempty"` // Parent slice to use for systemd +} + +type ActiveCgroup interface { + Cleanup() error + Paths() (map[string]string, error) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgroups_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgroups_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgroups_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgroups_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,27 @@ +package cgroups + +import ( + "bytes" + "testing" +) + +const ( + cgroupsContents = `11:hugetlb:/ +10:perf_event:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct,cpu:/ +3:cpuset:/ +2:name=systemd:/user.slice/user-1000.slice/session-16.scope` +) + +func TestParseCgroups(t *testing.T) { + r := bytes.NewBuffer([]byte(cgroupsContents)) + _, err := ParseCgroupFile("blkio", r) + if err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgutil/cgutil.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgutil/cgutil.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgutil/cgutil.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgutil/cgutil.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,264 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + "syscall" + "time" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" +) + +var createCommand = cli.Command{ + Name: "create", + Usage: "Create a cgroup container using the supplied configuration and initial process.", + Flags: []cli.Flag{ + cli.StringFlag{Name: "config, c", Value: "cgroup.json", Usage: "path to container configuration (cgroups.Cgroup object)"}, + cli.IntFlag{Name: "pid, p", Value: 0, Usage: "pid of the initial process in the container"}, + }, + Action: createAction, +} + +var destroyCommand = cli.Command{ + Name: "destroy", + Usage: "Destroy an existing cgroup container.", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, + cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, + }, + Action: destroyAction, +} + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "Get stats for cgroup", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, + cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, + }, + Action: statsAction, +} + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "Pause cgroup", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, + cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, + }, + Action: pauseAction, +} + +var resumeCommand = cli.Command{ + Name: "resume", + Usage: "Resume a paused cgroup", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, + cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, + }, + Action: resumeAction, +} + +var psCommand = cli.Command{ + Name: "ps", + Usage: "Get list of pids for a cgroup", + Flags: []cli.Flag{ + cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, + cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, + }, + Action: psAction, +} + +func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) { + f, err := os.Open(c.String("config")) + if err != nil { + return nil, err + } + defer f.Close() + + var config *cgroups.Cgroup + if err := json.NewDecoder(f).Decode(&config); err != nil { + log.Fatal(err) + } + return config, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + return nil +} + +func getConfig(context *cli.Context) (*cgroups.Cgroup, error) { + name := context.String("name") + if name == "" { + log.Fatal(fmt.Errorf("Missing container name")) + } + parent := context.String("parent") + return &cgroups.Cgroup{ + Name: name, + Parent: parent, + }, nil +} + +func killAll(config *cgroups.Cgroup) { + // We could use freezer here to prevent process spawning while we are trying + // to kill everything. But going with more portable solution of retrying for + // now. + pids := getPids(config) + retry := 10 + for len(pids) != 0 || retry > 0 { + killPids(pids) + time.Sleep(100 * time.Millisecond) + retry-- + pids = getPids(config) + } + if len(pids) != 0 { + log.Fatal(fmt.Errorf("Could not kill existing processes in the container.")) + } +} + +func getPids(config *cgroups.Cgroup) []int { + pids, err := fs.GetPids(config) + if err != nil { + log.Fatal(err) + } + return pids +} + +func killPids(pids []int) { + for _, pid := range pids { + // pids might go away on their own. Ignore errors. + syscall.Kill(pid, syscall.SIGKILL) + } +} + +func setFreezerState(context *cli.Context, state cgroups.FreezerState) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + if systemd.UseSystemd() { + err = systemd.Freeze(config, state) + } else { + err = fs.Freeze(config, state) + } + if err != nil { + log.Fatal(err) + } +} + +func createAction(context *cli.Context) { + config, err := getConfigFromFile(context) + if err != nil { + log.Fatal(err) + } + pid := context.Int("pid") + if pid <= 0 { + log.Fatal(fmt.Errorf("Invalid pid : %d", pid)) + } + if systemd.UseSystemd() { + _, err := systemd.Apply(config, pid) + if err != nil { + log.Fatal(err) + } + } else { + _, err := fs.Apply(config, pid) + if err != nil { + log.Fatal(err) + } + } +} + +func destroyAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + killAll(config) + // Systemd will clean up cgroup state for empty container. + if !systemd.UseSystemd() { + err := fs.Cleanup(config) + if err != nil { + log.Fatal(err) + } + } +} + +func statsAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + stats, err := fs.GetStats(config) + if err != nil { + log.Fatal(err) + } + + out, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Fatal(err) + } + fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out)) +} + +func pauseAction(context *cli.Context) { + setFreezerState(context, cgroups.Frozen) +} + +func resumeAction(context *cli.Context) { + setFreezerState(context, cgroups.Thawed) +} + +func psAction(context *cli.Context) { + config, err := getConfig(context) + if err != nil { + log.Fatal(err) + } + + pids, err := fs.GetPids(config) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Pids in '%s':\n", config.Name) + fmt.Println(pids) +} + +func main() { + logPath := os.Getenv("log") + if logPath != "" { + if err := openLog(logPath); err != nil { + log.Fatal(err) + } + } + + app := cli.NewApp() + app.Name = "cgutil" + app.Usage = "Test utility for libcontainer cgroups package" + app.Version = "0.1" + + app.Commands = []cli.Command{ + createCommand, + destroyCommand, + statsCommand, + pauseCommand, + resumeCommand, + psCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgutil/sample_cgroup.json docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgutil/sample_cgroup.json --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/cgutil/sample_cgroup.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/cgutil/sample_cgroup.json 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "name": "luke", + "parent": "darth", + "allow_all_devices": true, + "memory": 1073741824, + "memory_swap": -1, + "cpu_shares": 2048, + "cpu_quota": 500000, + "cpu_period": 250000 +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/apply_raw.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/apply_raw.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/apply_raw.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/apply_raw.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,248 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +var ( + subsystems = map[string]subsystem{ + "devices": &DevicesGroup{}, + "memory": &MemoryGroup{}, + "cpu": &CpuGroup{}, + "cpuset": &CpusetGroup{}, + "cpuacct": &CpuacctGroup{}, + "blkio": &BlkioGroup{}, + "perf_event": &PerfEventGroup{}, + "freezer": &FreezerGroup{}, + } + CgroupProcesses = "cgroup.procs" +) + +// The absolute path to the root of the cgroup hierarchies. +var cgroupRoot string + +// TODO(vmarmol): Report error here, we'll probably need to wait for the new API. +func init() { + // we can pick any subsystem to find the root + cpuRoot, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return + } + cgroupRoot = filepath.Dir(cpuRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return + } +} + +type subsystem interface { + // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. + GetStats(path string, stats *cgroups.Stats) error + // Removes the cgroup represented by 'data'. + Remove(*data) error + // Creates and joins the cgroup represented by data. + Set(*data) error +} + +type data struct { + root string + cgroup string + c *cgroups.Cgroup + pid int +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + d, err := getCgroupData(c, pid) + if err != nil { + return nil, err + } + + for _, sys := range subsystems { + if err := sys.Set(d); err != nil { + d.Cleanup() + return nil, err + } + } + + return d, nil +} + +func Cleanup(c *cgroups.Cgroup) error { + d, err := getCgroupData(c, 0) + if err != nil { + return fmt.Errorf("Could not get Cgroup data %s", err) + } + return d.Cleanup() +} + +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + + d, err := getCgroupData(c, 0) + if err != nil { + return nil, fmt.Errorf("getting CgroupData %s", err) + } + + for sysname, sys := range subsystems { + path, err := d.path(sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + + return nil, err + } + + if err := sys.GetStats(path, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Freeze toggles the container's freezer cgroup depending on the state +// provided +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + d, err := getCgroupData(c, 0) + if err != nil { + return err + } + + c.Freezer = state + + freezer := subsystems["freezer"] + + return freezer.Set(d) +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) { + if cgroupRoot == "" { + return nil, fmt.Errorf("failed to find the cgroup root") + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + return &data{ + root: cgroupRoot, + cgroup: cgroup, + c: c, + pid: pid, + }, nil +} + +func (raw *data) parent(subsystem string) (string, error) { + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) Paths() (map[string]string, error) { + paths := make(map[string]string) + + for sysname := range subsystems { + path, err := raw.path(sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + + return nil, err + } + + paths[sysname] = path + } + + return paths, nil +} + +func (raw *data) path(subsystem string) (string, error) { + // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. + if filepath.IsAbs(raw.cgroup) { + path := filepath.Join(raw.root, subsystem, raw.cgroup) + + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return "", cgroups.NewNotFoundError(subsystem) + } + + return "", err + } + + return path, nil + } + + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + + return filepath.Join(parent, raw.cgroup), nil +} + +func (raw *data) join(subsystem string) (string, error) { + path, err := raw.path(subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil { + return "", err + } + return path, nil +} + +func (raw *data) Cleanup() error { + for _, sys := range subsystems { + sys.Remove(raw) + } + return nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func readFile(dir, file string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(dir, file)) + return string(data), err +} + +func removePath(p string, err error) error { + if err != nil { + return err + } + if p != "" { + return os.RemoveAll(p) + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/blkio.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/blkio.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/blkio.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/blkio.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,167 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" +) + +type BlkioGroup struct { +} + +func (s *BlkioGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("blkio"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *BlkioGroup) Remove(d *data) error { + return removePath(d.path("blkio")) +} + +/* +examples: + + blkio.sectors + 8:0 6792 + + blkio.io_service_bytes + 8:0 Read 1282048 + 8:0 Write 2195456 + 8:0 Sync 2195456 + 8:0 Async 1282048 + 8:0 Total 3477504 + Total 3477504 + + blkio.io_serviced + 8:0 Read 124 + 8:0 Write 104 + 8:0 Sync 104 + 8:0 Async 124 + 8:0 Total 228 + Total 228 + + blkio.io_queued + 8:0 Read 0 + 8:0 Write 0 + 8:0 Sync 0 + 8:0 Async 0 + 8:0 Total 0 + Total 0 +*/ + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { + var blkioStats []cgroups.BlkioStatEntry + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return blkioStats, nil + } + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + + v, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + major := v + + v, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + minor := v + + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err = strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return nil, err + } + blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) + } + + return blkioStats, nil +} + +func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error { + // Try to read CFQ stats available on all CFQ enabled kernels first + if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil { + return getCFQStats(path, stats) + } + return getStats(path, stats) // Use generic stats as fallback +} + +func getCFQStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { + return err + } + stats.BlkioStats.SectorsRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { + return err + } + stats.BlkioStats.IoQueuedRecursive = blkioStats + + return nil +} + +func getStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/blkio_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/blkio_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/blkio_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/blkio_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,248 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + sectorsRecursiveContents = `8:0 1024` + serviceBytesRecursiveContents = `8:0 Read 100 +8:0 Write 200 +8:0 Sync 300 +8:0 Async 500 +8:0 Total 500 +Total 500` + servicedRecursiveContents = `8:0 Read 10 +8:0 Write 40 +8:0 Sync 20 +8:0 Async 30 +8:0 Total 50 +Total 50` + queuedRecursiveContents = `8:0 Read 1 +8:0 Write 4 +8:0 Sync 2 +8:0 Async 3 +8:0 Total 5 +Total 5` + throttleServiceBytes = `8:0 Read 11030528 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 11030528 +8:0 Total 11030528 +252:0 Read 11030528 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 11030528 +252:0 Total 11030528 +Total 22061056` + throttleServiced = `8:0 Read 164 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 164 +8:0 Total 164 +252:0 Read 164 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 164 +252:0 Total 164 +Total 328` +) + +func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { + *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op}) +} + +func TestBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "") + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total") + + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} + +func TestBlkioStatsNoSectorsFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceBytesFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServicedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoQueuedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read 100 100", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedFieldType(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read Write", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestNonCFQBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "", + "blkio.io_serviced_recursive": "", + "blkio.io_queued_recursive": "", + "blkio.sectors_recursive": "", + "blkio.throttle.io_service_bytes": throttleServiceBytes, + "blkio.throttle.io_serviced": throttleServiced, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpuacct.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpuacct.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpuacct.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpuacct.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,110 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/system" +) + +const ( + cgroupCpuacctStat = "cpuacct.stat" + nanosecondsInSecond = 1000000000 +) + +var clockTicks = uint64(system.GetClockTicks()) + +type CpuacctGroup struct { +} + +func (s *CpuacctGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *CpuacctGroup) Remove(d *data) error { + return removePath(d.path("cpuacct")) +} + +func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { + userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path) + if err != nil { + return err + } + + totalUsage, err := getCgroupParamUint(path, "cpuacct.usage") + if err != nil { + return err + } + + percpuUsage, err := getPercpuUsage(path) + if err != nil { + return err + } + + stats.CpuStats.CpuUsage.TotalUsage = totalUsage + stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage + stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage + stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage + return nil +} + +// Returns user and kernel usage breakdown in nanoseconds. +func getCpuUsageBreakdown(path string) (uint64, uint64, error) { + userModeUsage := uint64(0) + kernelModeUsage := uint64(0) + const ( + userField = "user" + systemField = "system" + ) + + // Expected format: + // user + // system + data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat)) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat)) + } + if fields[0] != userField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) + } + if fields[2] != systemField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField) + } + if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { + return 0, 0, err + } + if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { + return 0, 0, err + } + + return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil +} + +func getPercpuUsage(path string) ([]uint64, error) { + percpuUsage := []uint64{} + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) + if err != nil { + return percpuUsage, err + } + for _, value := range strings.Fields(string(data)) { + value, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + percpuUsage = append(percpuUsage, value) + } + return percpuUsage, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpu.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpu.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpu.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpu.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,72 @@ +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpuGroup struct { +} + +func (s *CpuGroup) Set(d *data) error { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := d.join("cpu") + if err != nil { + return err + } + if d.c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil { + return err + } + } + if d.c.CpuPeriod != 0 { + if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil { + return err + } + } + if d.c.CpuQuota != 0 { + if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil { + return err + } + } + return nil +} + +func (s *CpuGroup) Remove(d *data) error { + return removePath(d.path("cpu")) +} + +func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "nr_periods": + stats.CpuStats.ThrottlingData.Periods = v + + case "nr_throttled": + stats.CpuStats.ThrottlingData.ThrottledPeriods = v + + case "throttled_time": + stats.CpuStats.ThrottlingData.ThrottledTime = v + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpuset.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpuset.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpuset.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpuset.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,119 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpusetGroup struct { +} + +func (s *CpusetGroup) Set(d *data) error { + // we don't want to join this cgroup unless it is specified + if d.c.CpusetCpus != "" { + dir, err := d.path("cpuset") + if err != nil { + return err + } + + return s.SetDir(dir, d.c.CpusetCpus, d.pid) + } + + return nil +} + +func (s *CpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} + +func (s *CpusetGroup) SetDir(dir, value string, pid int) error { + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil { + return err + } + + if err := writeFile(dir, "cpuset.cpus", value); err != nil { + return err + } + + return nil +} + +func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *CpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *CpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpu_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpu_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/cpu_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/cpu_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,69 @@ +package fs + +import ( + "fmt" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + const ( + kNrPeriods = 2000 + kNrThrottled = 200 + kThrottledTime = uint64(18446744073709551615) + ) + + cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n", + kNrPeriods, kNrThrottled, kThrottledTime) + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + expectedStats := cgroups.ThrottlingData{ + Periods: kNrPeriods, + ThrottledPeriods: kNrThrottled, + ThrottledTime: kThrottledTime} + + expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal("Expected not to fail, but did") + } +} + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/devices.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/devices.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/devices.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/devices.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,34 @@ +package fs + +import "github.com/docker/libcontainer/cgroups" + +type DevicesGroup struct { +} + +func (s *DevicesGroup) Set(d *data) error { + dir, err := d.join("devices") + if err != nil { + return err + } + + if !d.c.AllowAllDevices { + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range d.c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + } + return nil +} + +func (s *DevicesGroup) Remove(d *data) error { + return removePath(d.path("devices")) +} + +func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/freezer.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/freezer.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/freezer.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/freezer.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "time" + + "github.com/docker/libcontainer/cgroups" +) + +type FreezerGroup struct { +} + +func (s *FreezerGroup) Set(d *data) error { + switch d.c.Freezer { + case cgroups.Frozen, cgroups.Thawed: + dir, err := d.path("freezer") + if err != nil { + return err + } + + if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil { + return err + } + + for { + state, err := readFile(dir, "freezer.state") + if err != nil { + return err + } + if strings.TrimSpace(state) == string(d.c.Freezer) { + break + } + time.Sleep(1 * time.Millisecond) + } + default: + if _, err := d.join("freezer"); err != nil && !cgroups.IsNotFound(err) { + return err + } + } + + return nil +} + +func (s *FreezerGroup) Remove(d *data) error { + return removePath(d.path("freezer")) +} + +func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/memory.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/memory.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/memory.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/memory.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,93 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type MemoryGroup struct { +} + +func (s *MemoryGroup) Set(d *data) error { + dir, err := d.join("memory") + // only return an error for memory if it was specified + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + // Only set values if some config was specified. + if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 { + if d.c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { + return err + } + } + if d.c.MemoryReservation != 0 { + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if d.c.MemorySwap != -1 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil { + return err + } + } + } + return nil +} + +func (s *MemoryGroup) Remove(d *data) error { + return removePath(d.path("memory")) +} + +func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) + } + stats.MemoryStats.Stats[t] = v + } + + // Set memory usage and max historical usage. + value, err := getCgroupParamUint(path, "memory.usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err) + } + stats.MemoryStats.Usage = value + value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err) + } + stats.MemoryStats.MaxUsage = value + value, err = getCgroupParamUint(path, "memory.failcnt") + if err != nil { + return fmt.Errorf("failed to parse memory.failcnt - %v", err) + } + stats.MemoryStats.Failcnt = value + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/memory_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/memory_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/memory_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/memory_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,134 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + memoryStatContents = `cache 512 +rss 1024` + memoryUsageContents = "2048\n" + memoryMaxUsageContents = "4096\n" + memoryFailcnt = "100\n" +) + +func TestMemoryStats(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + "memory.failcnt": memoryFailcnt, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}} + expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats) +} + +func TestMemoryStatsNoStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } +} + +func TestMemoryStatsNoUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": "rss rss", + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": "bad", + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": "bad", + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/notify_linux.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/notify_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/notify_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/notify_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,82 @@ +// +build linux + +package fs + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/cgroups" +) + +// NotifyOnOOM sends signals on the returned channel when the cgroup reaches +// its memory limit. The channel is closed when the cgroup is removed. +func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + return notifyOnOOM(d) +} + +func notifyOnOOM(d *data) (<-chan struct{}, error) { + dir, err := d.path("memory") + if err != nil { + return nil, err + } + + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) + if err != nil { + eventfd.Close() + return nil, err + } + + var ( + eventControlPath = filepath.Join(dir, "cgroup.event_control") + data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) + ) + + if err := writeFile(dir, "cgroup.event_control", data); err != nil { + eventfd.Close() + oomControl.Close() + return nil, err + } + + ch := make(chan struct{}) + + go func() { + defer func() { + close(ch) + eventfd.Close() + oomControl.Close() + }() + + buf := make([]byte, 8) + + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + + ch <- struct{}{} + } + }() + + return ch, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/notify_linux_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/notify_linux_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/notify_linux_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/notify_linux_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,86 @@ +// +build linux + +package fs + +import ( + "encoding/binary" + "fmt" + "syscall" + "testing" + "time" +) + +func TestNotifyOnOOM(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + + helper.writeFileContents(map[string]string{ + "memory.oom_control": "", + "cgroup.event_control": "", + }) + + var eventFd, oomControlFd int + + ooms, err := notifyOnOOM(helper.CgroupData) + if err != nil { + t.Fatal("expected no error, got:", err) + } + + memoryPath, _ := helper.CgroupData.path("memory") + data, err := readFile(memoryPath, "cgroup.event_control") + if err != nil { + t.Fatal("couldn't read event control file:", err) + } + + if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil { + t.Fatalf("invalid control data %q: %s", data, err) + } + + // re-open the eventfd + efd, err := syscall.Dup(eventFd) + if err != nil { + t.Fatal("unable to reopen eventfd:", err) + } + defer syscall.Close(efd) + + if err != nil { + t.Fatal("unable to dup event fd:", err) + } + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, 1) + + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + select { + case <-ooms: + case <-time.After(100 * time.Millisecond): + t.Fatal("no notification on oom channel after 100ms") + } + + // simulate what happens when a cgroup is destroyed by cleaning up and then + // writing to the eventfd. + helper.cleanup() + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + // give things a moment to shut down + select { + case _, ok := <-ooms: + if ok { + t.Fatal("expected no oom to be triggered") + } + case <-time.After(100 * time.Millisecond): + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected oom control to be closed") + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected event fd to be closed") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/perf_event.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/perf_event.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/perf_event.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/perf_event.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,24 @@ +package fs + +import ( + "github.com/docker/libcontainer/cgroups" +) + +type PerfEventGroup struct { +} + +func (s *PerfEventGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *PerfEventGroup) Remove(d *data) error { + return removePath(d.path("perf_event")) +} + +func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/stats_util_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/stats_util_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/stats_util_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/stats_util_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,73 @@ +package fs + +import ( + "fmt" + "log" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error { + if len(expected) != len(actual) { + return fmt.Errorf("blkioStatEntries length do not match") + } + for i, expValue := range expected { + actValue := actual[i] + if expValue != actValue { + return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue) + } + } + return nil +} + +func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { + if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil { + log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil { + log.Printf("blkio IoServicedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil { + log.Printf("blkio IoQueuedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil { + log.Printf("blkio SectorsRecursive do not match - %s\n", err) + t.Fail() + } +} + +func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { + if expected != actual { + log.Printf("Expected throttling data %v but found %v\n", expected, actual) + t.Fail() + } +} + +func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { + if expected.Usage != actual.Usage { + log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) + t.Fail() + } + if expected.MaxUsage != actual.MaxUsage { + log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) + t.Fail() + } + for key, expValue := range expected.Stats { + actValue, ok := actual.Stats[key] + if !ok { + log.Printf("Expected memory stat key %s not found\n", key) + t.Fail() + } + if expValue != actValue { + log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) + t.Fail() + } + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/utils.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/utils.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/utils.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,62 @@ +package fs + +import ( + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +var ( + ErrNotSupportStat = errors.New("stats are not supported for subsystem") + ErrNotValidFormat = errors.New("line is not a valid key value format") +) + +// Saturates negative values at zero and returns a uint64. +// Due to kernel bugs, some of the memory cgroup stats can be negative. +func parseUint(s string, base, bitSize int) (uint64, error) { + value, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + return 0, nil + } + + return value, err + } + + return value, nil +} + +// Parses a cgroup param and returns as name, value +// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 +func getCgroupParamKeyValue(t string) (string, uint64, error) { + parts := strings.Fields(t) + switch len(parts) { + case 2: + value, err := parseUint(parts[1], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("Unable to convert param value (%q) to uint64: %v", parts[1], err) + } + + return parts[0], value, nil + default: + return "", 0, ErrNotValidFormat + } +} + +// Gets a single uint64 value from the specified cgroup file. +func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return 0, err + } + + return parseUint(strings.TrimSpace(string(contents)), 10, 64) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/utils_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/utils_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/utils_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/utils_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,95 @@ +package fs + +import ( + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "testing" +) + +const ( + cgroupFile = "cgroup.file" + floatValue = 2048.0 + floatString = "2048" +) + +func TestGetCgroupParamsInt(t *testing.T) { + // Setup tempdir. + tempDir, err := ioutil.TempDir("", "cgroup_utils_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, cgroupFile) + + // Success. + err = ioutil.WriteFile(tempFile, []byte(floatString), 0755) + if err != nil { + t.Fatal(err) + } + value, err := getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with new line. + err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with negative values + err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %f", value, 0) + } + + // Success with negative values lesser than min int64 + s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64) + err = ioutil.WriteFile(tempFile, []byte(s), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %f", value, 0) + } + + // Not a float. + err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } + + // Unknown file. + err = os.Remove(tempFile) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/util_test.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/util_test.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/fs/util_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/fs/util_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,60 @@ +/* +Utility for testing cgroup operations. + +Creates a mock of the cgroup filesystem for the duration of the test. +*/ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +type cgroupTestUtil struct { + // data to use in tests. + CgroupData *data + + // Path to the mock cgroup directory. + CgroupPath string + + // Temporary directory to store mock cgroup filesystem. + tempDir string + t *testing.T +} + +// Creates a new test util for the specified subsystem +func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil { + d := &data{} + tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem)) + if err != nil { + t.Fatal(err) + } + d.root = tempDir + testCgroupPath, err := d.path(subsystem) + if err != nil { + t.Fatal(err) + } + + // Ensure the full mock cgroup path exists. + err = os.MkdirAll(testCgroupPath, 0755) + if err != nil { + t.Fatal(err) + } + return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t} +} + +func (c *cgroupTestUtil) cleanup() { + os.RemoveAll(c.tempDir) +} + +// Write the specified contents on the mock of the specified cgroup files. +func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { + for file, contents := range fileContents { + err := writeFile(c.CgroupPath, file, contents) + if err != nil { + c.t.Fatal(err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/stats.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/stats.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/stats.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/stats.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,69 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +// All CPU stats are aggregate since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage,omitempty"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + return &Stats{MemoryStats: memoryStats} +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/systemd/apply_nosystemd.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/systemd/apply_nosystemd.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/systemd/apply_nosystemd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/systemd/apply_nosystemd.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,29 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/docker/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") +} + +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + return nil, fmt.Errorf("Systemd not supported") +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/systemd/apply_systemd.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/systemd/apply_systemd.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/systemd/apply_systemd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/systemd/apply_systemd.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,358 @@ +// +build linux + +package systemd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/dbus" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/godbus/dbus" +) + +type systemdCgroup struct { + cgroup *cgroups.Cgroup +} + +type subsystem interface { + GetStats(string, *cgroups.Stats) error +} + +var ( + connLock sync.Mutex + theConn *systemd.Conn + hasStartTransientUnit bool + subsystems = map[string]subsystem{ + "devices": &fs.DevicesGroup{}, + "memory": &fs.MemoryGroup{}, + "cpu": &fs.CpuGroup{}, + "cpuset": &fs.CpusetGroup{}, + "cpuacct": &fs.CpuacctGroup{}, + "blkio": &fs.BlkioGroup{}, + "perf_event": &fs.PerfEventGroup{}, + "freezer": &fs.FreezerGroup{}, + } +) + +func UseSystemd() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil || !s.IsDir() { + return false + } + + connLock.Lock() + defer connLock.Unlock() + + if theConn == nil { + var err error + theConn, err = systemd.New() + if err != nil { + return false + } + + // Assume we have StartTransientUnit + hasStartTransientUnit = true + + // But if we get UnknownMethod error we don't + if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil { + if dbusError, ok := err.(dbus.Error); ok { + if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { + hasStartTransientUnit = false + } + } + } + } + return hasStartTransientUnit +} + +func getIfaceForUnit(unitName string) string { + if strings.HasSuffix(unitName, ".scope") { + return "Scope" + } + if strings.HasSuffix(unitName, ".service") { + return "Service" + } + return "Unit" +} + +func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { + var ( + unitName = getUnitName(c) + slice = "system.slice" + properties []systemd.Property + res = &systemdCgroup{} + ) + + res.cgroup = c + + if c.Slice != "" { + slice = c.Slice + } + + properties = append(properties, + systemd.Property{"Slice", dbus.MakeVariant(slice)}, + systemd.Property{"Description", dbus.MakeVariant("docker container " + c.Name)}, + systemd.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}, + ) + + // Always enable accounting, this gets us the same behaviour as the fs implementation, + // plus the kernel has some problems with joining the memory cgroup at a later time. + properties = append(properties, + systemd.Property{"MemoryAccounting", dbus.MakeVariant(true)}, + systemd.Property{"CPUAccounting", dbus.MakeVariant(true)}, + systemd.Property{"BlockIOAccounting", dbus.MakeVariant(true)}) + + if c.Memory != 0 { + properties = append(properties, + systemd.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) + } + // TODO: MemoryReservation and MemorySwap not available in systemd + + if c.CpuShares != 0 { + properties = append(properties, + systemd.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))}) + } + + if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { + return nil, err + } + + if !c.AllowAllDevices { + if err := joinDevices(c, pid); err != nil { + return nil, err + } + } + + // -1 disables memorySwap + if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) { + if err := joinMemory(c, pid); err != nil { + return nil, err + } + + } + + // we need to manually join the freezer cgroup in systemd because it does not currently support it + // via the dbus api + if err := joinFreezer(c, pid); err != nil { + return nil, err + } + + if c.CpusetCpus != "" { + if err := joinCpuset(c, pid); err != nil { + return nil, err + } + } + + return res, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func (c *systemdCgroup) Paths() (map[string]string, error) { + paths := make(map[string]string) + + for sysname := range subsystems { + subsystemPath, err := getSubsystemPath(c.cgroup, sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + + return nil, err + } + + paths[sysname] = subsystemPath + } + + return paths, nil +} + +func (c *systemdCgroup) Cleanup() error { + // systemd cleans up, we don't need to do much + paths, err := c.Paths() + if err != nil { + return err + } + + for _, path := range paths { + os.RemoveAll(path) + } + + return nil +} + +func joinFreezer(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700) +} + +func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) { + mountpoint, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return "", err + } + + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + + slice := "system.slice" + if c.Slice != "" { + slice = c.Slice + } + + return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil { + return err + } + for { + state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state")) + if err != nil { + return err + } + if string(state) == string(bytes.TrimSpace(state_)) { + break + } + time.Sleep(1 * time.Millisecond) + } + return nil +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + path, err := getSubsystemPath(c, "cpu") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(path) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} + +/* + * This would be nicer to get from the systemd API when accounting + * is enabled, but sadly there is no way to do that yet. + * The lack of this functionality in the API & the approach taken + * is guided by + * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation. + */ +func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + + for sysname, sys := range subsystems { + subsystemPath, err := getSubsystemPath(c, sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + + return nil, err + } + + if err := sys.GetStats(subsystemPath, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Atm we can't use the systemd device support because of two missing things: +// * Support for wildcards to allow mknod on any device +// * Support for wildcards to allow /dev/pts support +// +// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is +// in wide use. When both these are availalable we will be able to switch, but need to keep the old +// implementation for backwards compat. +// +// Note: we can't use systemd to set up the initial limits, and then change the cgroup +// because systemd will re-write the device settings if it needs to re-apply the cgroup context. +// This happens at least for v208 when any sibling unit is started. +func joinDevices(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "devices") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return err + } + + if err := writeFile(path, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range c.AllowedDevices { + if err := writeFile(path, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + + return nil +} + +func joinMemory(c *cgroups.Cgroup, pid int) error { + memorySwap := c.MemorySwap + + if memorySwap == 0 { + // By default, MemorySwap is set to twice the size of RAM. + memorySwap = c.Memory * 2 + } + + path, err := getSubsystemPath(c, "memory") + if err != nil { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700) +} + +// systemd does not atm set up the cpuset controller, so we must manually +// join it. Additionally that is a very finicky controller where each +// level must have a full setup as the default for a new directory is "no cpus" +func joinCpuset(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "cpuset") + if err != nil { + return err + } + + s := &fs.CpusetGroup{} + + return s.SetDir(path, c.CpusetCpus, pid) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/cgroups/utils.go docker.io-1.3.2~dfsg1/libcontainer/cgroups/utils.go --- docker.io-0.9.1~dfsg1/libcontainer/cgroups/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/cgroups/utils.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,194 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/mount" +) + +// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt +func FindCgroupMountpoint(subsystem string) (string, error) { + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if opt == subsystem { + return mount.Mountpoint, nil + } + } + } + } + + return "", NewNotFoundError(subsystem) +} + +type Mount struct { + Mountpoint string + Subsystems []string +} + +func (m Mount) GetThisCgroupDir() (string, error) { + if len(m.Subsystems) == 0 { + return "", fmt.Errorf("no subsystem for mount") + } + + return GetThisCgroupDir(m.Subsystems[0]) +} + +func GetCgroupMounts() ([]Mount, error) { + mounts, err := mount.GetMounts() + if err != nil { + return nil, err + } + + all, err := GetAllSubsystems() + if err != nil { + return nil, err + } + + allMap := make(map[string]bool) + for _, s := range all { + allMap[s] = true + } + + res := []Mount{} + for _, mount := range mounts { + if mount.Fstype == "cgroup" { + m := Mount{Mountpoint: mount.Mountpoint} + + for _, opt := range strings.Split(mount.VfsOpts, ",") { + if strings.HasPrefix(opt, "name=") { + m.Subsystems = append(m.Subsystems, opt) + } + if allMap[opt] { + m.Subsystems = append(m.Subsystems, opt) + } + } + res = append(res, m) + } + } + return res, nil +} + +// Returns all the cgroup subsystems supported by the kernel +func GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + return subsystems, nil +} + +// Returns the relative path to the cgroup docker is running in. +func GetThisCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return ParseCgroupFile(subsystem, f) +} + +func GetInitCgroupDir(subsystem string) (string, error) { + f, err := os.Open("/proc/1/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return ParseCgroupFile(subsystem, f) +} + +func ReadProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +func ParseCgroupFile(subsystem string, r io.Reader) (string, error) { + s := bufio.NewScanner(r) + + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + + text := s.Text() + parts := strings.Split(text, ":") + + for _, subs := range strings.Split(parts[1], ",") { + if subs == subsystem { + return parts[2], nil + } + } + } + + return "", NewNotFoundError(subsystem) +} + +func pathExists(path string) bool { + if _, err := os.Stat(path); err != nil { + return false + } + return true +} + +func EnterPid(cgroupPaths map[string]string, pid int) error { + for _, path := range cgroupPaths { + if pathExists(path) { + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), + []byte(strconv.Itoa(pid)), 0700); err != nil { + return err + } + } + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/config.go docker.io-1.3.2~dfsg1/libcontainer/config.go --- docker.io-0.9.1~dfsg1/libcontainer/config.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/config.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,89 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/network" +) + +type MountConfig mount.MountConfig + +type Network network.Network + +// Config defines configuration options for executing a process inside a contained environment. +type Config struct { + // Mount specific options. + MountConfig *MountConfig `json:"mount_config,omitempty"` + + // Pathname to container's root filesystem + RootFs string `json:"root_fs,omitempty"` + + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname,omitempty"` + + // User will set the uid and gid of the executing process running inside the container + User string `json:"user,omitempty"` + + // WorkingDir will change the processes current working directory inside the container's rootfs + WorkingDir string `json:"working_dir,omitempty"` + + // Env will populate the processes environment with the provided values + // Any values from the parent processes will be cleared before the values + // provided in Env are provided to the process + Env []string `json:"environment,omitempty"` + + // Tty when true will allocate a pty slave on the host for access by the container's process + // and ensure that it is mounted inside the container's rootfs + Tty bool `json:"tty,omitempty"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces map[string]bool `json:"namespaces,omitempty"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capbilities not specified will be dropped from the processes capability mask + Capabilities []string `json:"capabilities,omitempty"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks,omitempty"` + + // Routes can be specified to create entries in the route table as the container is started + Routes []*Route `json:"routes,omitempty"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` + + // AppArmorProfile specifies the profile to apply to the process running in the container and is + // change at the time the process is execed + AppArmorProfile string `json:"apparmor_profile,omitempty"` + + // ProcessLabel specifies the label to apply to the process running in the container. It is + // commonly used by selinux + ProcessLabel string `json:"process_label,omitempty"` + + // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and + // /proc/bus + RestrictSys bool `json:"restrict_sys,omitempty"` +} + +// Routes can be specified to create entries in the route table as the container is started +// +// All of destination, source, and gateway should be either IPv4 or IPv6. +// One of the three options must be present, and ommitted entries will use their +// IP family default for the route table. For IPv4 for example, setting the +// gateway to 1.2.3.4 and the interface to eth0 will set up a standard +// destination of 0.0.0.0(or *) when viewed in the route table. +type Route struct { + // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + Destination string `json:"destination,omitempty"` + + // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + Source string `json:"source,omitempty"` + + // Sets the gateway. Accepts IPv4 and IPv6 + Gateway string `json:"gateway,omitempty"` + + // The device to set this route up for, for example: eth0 + InterfaceName string `json:"interface_name,omitempty"` +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/config_test.go docker.io-1.3.2~dfsg1/libcontainer/config_test.go --- docker.io-0.9.1~dfsg1/libcontainer/config_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/config_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,160 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/docker/libcontainer/devices" +) + +// Checks whether the expected capability is specified in the capabilities. +func contains(expected string, values []string) bool { + for _, v := range values { + if v == expected { + return true + } + } + return false +} + +func containsDevice(expected *devices.Device, values []*devices.Device) bool { + for _, d := range values { + if d.Path == expected.Path && + d.CgroupPermissions == expected.CgroupPermissions && + d.FileMode == expected.FileMode && + d.MajorNumber == expected.MajorNumber && + d.MinorNumber == expected.MinorNumber && + d.Type == expected.Type { + return true + } + } + return false +} + +func loadConfig(name string) (*Config, error) { + f, err := os.Open(filepath.Join("sample_configs", name)) + if err != nil { + return nil, err + } + defer f.Close() + + var container *Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func TestConfigJsonFormat(t *testing.T) { + container, err := loadConfig("attach_to_bridge.json") + if err != nil { + t.Fatal(err) + } + + if container.Hostname != "koye" { + t.Log("hostname is not set") + t.Fail() + } + + if !container.Tty { + t.Log("tty should be set to true") + t.Fail() + } + + if !container.Namespaces["NEWNET"] { + t.Log("namespaces should contain NEWNET") + t.Fail() + } + + if container.Namespaces["NEWUSER"] { + t.Log("namespaces should not contain NEWUSER") + t.Fail() + } + + if contains("SYS_ADMIN", container.Capabilities) { + t.Log("SYS_ADMIN should not be enabled in capabilities mask") + t.Fail() + } + + if !contains("MKNOD", container.Capabilities) { + t.Log("MKNOD should be enabled in capabilities mask") + t.Fail() + } + + if !contains("SYS_CHROOT", container.Capabilities) { + t.Log("capabilities mask should contain SYS_CHROOT") + t.Fail() + } + + for _, n := range container.Networks { + if n.Type == "veth" { + if n.Bridge != "docker0" { + t.Logf("veth bridge should be docker0 but received %q", n.Bridge) + t.Fail() + } + + if n.Address != "172.17.0.101/16" { + t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address) + t.Fail() + } + + if n.VethPrefix != "veth" { + t.Logf("veth prefix should be veth but received %q", n.VethPrefix) + t.Fail() + } + + if n.Gateway != "172.17.42.1" { + t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway) + t.Fail() + } + + if n.Mtu != 1500 { + t.Logf("veth mtu should be 1500 but received %d", n.Mtu) + t.Fail() + } + + break + } + } + + for _, d := range devices.DefaultSimpleDevices { + if !containsDevice(d, container.MountConfig.DeviceNodes) { + t.Logf("expected device configuration for %s", d.Path) + t.Fail() + } + } + + if !container.RestrictSys { + t.Log("expected restrict sys to be true") + t.Fail() + } +} + +func TestApparmorProfile(t *testing.T) { + container, err := loadConfig("apparmor.json") + if err != nil { + t.Fatal(err) + } + + if container.AppArmorProfile != "docker-default" { + t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile) + } +} + +func TestSelinuxLabels(t *testing.T) { + container, err := loadConfig("selinux.json") + if err != nil { + t.Fatal(err) + } + label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475" + + if container.ProcessLabel != label { + t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel) + } + if container.MountConfig.MountLabel != label { + t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/console/console.go docker.io-1.3.2~dfsg1/libcontainer/console/console.go --- docker.io-0.9.1~dfsg1/libcontainer/console/console.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/console/console.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,128 @@ +// +build linux + +package console + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/docker/libcontainer/label" +) + +// Setup initializes the proper /dev/console inside the rootfs path +func Setup(rootfs, consolePath, mountLabel string) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + if err := os.Chmod(consolePath, 0600); err != nil { + return err + } + + if err := os.Chown(consolePath, 0, 0); err != nil { + return err + } + + if err := label.SetFileLabel(consolePath, mountLabel); err != nil { + return fmt.Errorf("set file label %s %s", consolePath, err) + } + + dest := filepath.Join(rootfs, "dev/console") + + f, err := os.Create(dest) + if err != nil && !os.IsExist(err) { + return fmt.Errorf("create %s %s", dest, err) + } + + if f != nil { + f.Close() + } + + if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) + } + + return nil +} + +func OpenAndDup(consolePath string) error { + slave, err := OpenTerminal(consolePath, syscall.O_RDWR) + if err != nil { + return fmt.Errorf("open terminal %s", err) + } + + if err := syscall.Dup2(int(slave.Fd()), 0); err != nil { + return err + } + + if err := syscall.Dup2(int(slave.Fd()), 1); err != nil { + return err + } + + return syscall.Dup2(int(slave.Fd()), 2) +} + +// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// Unlockpt should be called before opening the slave side of a pseudoterminal. +func Unlockpt(f *os.File) error { + var u int + + return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// Ptsname retrieves the name of the first available pts for the given master. +func Ptsname(f *os.File) (string, error) { + var n int + + if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the +// pts name for use as the pty slave inside the container +func CreateMasterAndConsole() (*os.File, string, error) { + master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + + console, err := Ptsname(master) + if err != nil { + return nil, "", err + } + + if err := Unlockpt(master); err != nil { + return nil, "", err + } + + return master, console, nil +} + +// OpenPtmx opens /dev/ptmx, i.e. the PTY master. +func OpenPtmx() (*os.File, error) { + // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. + return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) +} + +// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC +// used to open the pty slave inside the container namespace +func OpenTerminal(name string, flag int) (*os.File, error) { + r, e := syscall.Open(name, flag, 0) + if e != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: e} + } + return os.NewFile(uintptr(r), name), nil +} + +func Ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/container.go docker.io-1.3.2~dfsg1/libcontainer/container.go --- docker.io-0.9.1~dfsg1/libcontainer/container.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/container.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,78 @@ +/* +NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice. +*/ +package libcontainer + +// A libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + // Returns the ID of the container + ID() string + + // Returns the current run state of the container. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + RunState() (*RunState, Error) + + // Returns the current config of the container. + Config() *Config + + // Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // ConfigInvalid - config is invalid, + // ContainerPaused - Container is paused, + // SystemError - System error. + Start(config *ProcessConfig) (pid int, exitChan chan int, err Error) + + // Destroys the container after killing all running processes. + // + // Any event registrations are removed before the container is destroyed. + // No error is returned if the container is already destroyed. + // + // Errors: + // SystemError - System error. + Destroy() Error + + // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + // + // Some of the returned PIDs may no longer refer to processes in the Container, unless + // the Container state is PAUSED in which case every PID in the slice is valid. + Processes() ([]int, Error) + + // Returns statistics for the container. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Stats() (*ContainerStats, Error) + + // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses + // the execution of any user processes. Asynchronously, when the container finished being paused the + // state is changed to PAUSED. + // If the Container state is PAUSED, do nothing. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Pause() Error + + // If the Container state is PAUSED, resumes the execution of any user processes in the + // Container before setting the Container state to RUNNING. + // If the Container state is RUNNING, do nothing. + // + // Errors: + // ContainerDestroyed - Container no longer exists, + // SystemError - System error. + Resume() Error +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/CONTRIBUTORS_GUIDE.md docker.io-1.3.2~dfsg1/libcontainer/CONTRIBUTORS_GUIDE.md --- docker.io-0.9.1~dfsg1/libcontainer/CONTRIBUTORS_GUIDE.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/CONTRIBUTORS_GUIDE.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,204 @@ +# The libcontainer Contributors' Guide + +Want to hack on libcontainer? Awesome! Here are instructions to get you +started. They are probably not perfect, please let us know if anything +feels wrong or incomplete. + +## Reporting Issues + +When reporting [issues](https://github.com/docker/libcontainer/issues) +on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), +the output of `uname -a`. Please include the steps required to reproduce +the problem if possible and applicable. +This information will help us review and fix your issue faster. + +## Development Environment + +*Add instructions on setting up the development environment.* + +## Contribution Guidelines + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep libcontainer lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. However, there might be a way to implement +that feature *on top of* libcontainer. + +### Discuss your design on the mailing list + +We recommend discussing your plans [on the mailing +list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer) +before starting to code - especially for more ambitious contributions. +This gives other contributors a chance to point you in the right +direction, give feedback on your design, and maybe point out if someone +else is working on the same thing. + +### Create issues... + +Any significant improvement should be documented as [a GitHub +issue](https://github.com/docker/libcontainer/issues) before anybody +starts working on it. + +### ...but check for existing issues first! + +Please take a moment to check that an issue doesn't already exist +documenting your bug report or improvement proposal. If it does, it +never hurts to add a quick "+1" or "I have this problem too". This will +help prioritize the most common problems and requests. + +### Conventions + +Fork the repo and make changes on your fork in a feature branch: + +- If it's a bugfix branch, name it XXX-something where XXX is the number of the + issue +- If it's a feature branch, create an enhancement issue to announce your + intentions, and name it XXX-something where XXX is the number of the issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. Run the full test suite on +your branch before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. See ``docs/README.md`` for more +information on building the docs and how docs get released. + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plugins that do this automatically. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Pull requests must not contain commits from other users or branches. + +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite should be passing. Include documentation changes in the +same commit so that a revert would remove all traces of the feature or fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Testing + +Make sure you include suitable tests, preferably unit tests, in your pull request +and that all the tests pass. + +*Instructions for running tests to be added.* + +### Merge approval + +libcontainer maintainers use LGTM (looks good to me) in comments on the code review +to indicate acceptance. + +A change requires LGTMs from at lease two maintainers. One of those must come from +a maintainer of the component affected. For example, if a change affects `netlink/` +and `security`, it needs at least one LGTM from a maintainer of each. Maintainers +only need one LGTM as presumably they LGTM their own change. + +For more details see [MAINTAINERS.md](MAINTAINERS.md) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +One way to automate this, is customise your get ``commit.template`` by adding +a ``prepare-commit-msg`` hook to your libcontainer checkout: + +``` +curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg +``` + +* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` + +#### Small patch exception + +There are several exceptions to the signing requirement. Currently these are: + +* Your patch fixes spelling or grammar errors. +* Your patch is a single line change to documentation contained in the + `docs` directory. +* Your patch fixes Markdown formatting or syntax errors in the + documentation contained in the `docs` directory. + +If you have any questions, please refer to the FAQ in the [docs](to be written) + +### How can I become a maintainer? + +* Step 1: learn the component inside out +* Step 2: make yourself useful by contributing code, bugfixes, support etc. +* Step 3: volunteer on the irc channel (#libcontainer@freenode) + +Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. +You don't have to be a maintainer to make a difference on the project! + diff -Nru docker.io-0.9.1~dfsg1/libcontainer/devices/defaults.go docker.io-1.3.2~dfsg1/libcontainer/devices/defaults.go --- docker.io-0.9.1~dfsg1/libcontainer/devices/defaults.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/devices/defaults.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,159 @@ +package devices + +var ( + // These are devices that are to be both allowed and created. + + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + MajorNumber: 1, + MinorNumber: 3, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + MajorNumber: 1, + MinorNumber: 5, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + MajorNumber: 1, + MinorNumber: 7, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + MajorNumber: 5, + MinorNumber: 0, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + MajorNumber: 1, + MinorNumber: 9, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + MajorNumber: 1, + MinorNumber: 8, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + } + + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + { + Type: 'b', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + MajorNumber: 5, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty0", + Type: 'c', + MajorNumber: 4, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty1", + Type: 'c', + MajorNumber: 4, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + MajorNumber: 136, + MinorNumber: Wildcard, + CgroupPermissions: "rwm", + }, + { + Path: "", + Type: 'c', + MajorNumber: 5, + MinorNumber: 2, + CgroupPermissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 200, + CgroupPermissions: "rwm", + }, + + /*// fuse + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + + // rtc + { + Path: "", + Type: 'c', + MajorNumber: 254, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + */ + }, DefaultSimpleDevices...) + + DefaultAutoCreatedDevices = append([]*Device{ + { + // /dev/fuse is created but not allowed. + // This is to allow java to work. Because java + // Insists on there being a /dev/fuse + // https://github.com/docker/docker/issues/514 + // https://github.com/docker/docker/issues/2393 + // + Path: "/dev/fuse", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + }, DefaultSimpleDevices...) +) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/devices/devices.go docker.io-1.3.2~dfsg1/libcontainer/devices/devices.go --- docker.io-0.9.1~dfsg1/libcontainer/devices/devices.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/devices/devices.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,129 @@ +package devices + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const ( + Wildcard = -1 +) + +var ( + ErrNotADeviceNode = errors.New("not a device node") +) + +// Testing dependencies +var ( + osLstat = os.Lstat + ioutilReadDir = ioutil.ReadDir +) + +type Device struct { + Type rune `json:"type,omitempty"` + Path string `json:"path,omitempty"` // It is fine if this is an empty string in the case that you are using Wildcards + MajorNumber int64 `json:"major_number,omitempty"` // Use the wildcard constant for wildcards. + MinorNumber int64 `json:"minor_number,omitempty"` // Use the wildcard constant for wildcards. + CgroupPermissions string `json:"cgroup_permissions,omitempty"` // Typically just "rwm" + FileMode os.FileMode `json:"file_mode,omitempty"` // The permission bits of the file's mode + Uid uint32 `json:"uid,omitempty"` + Gid uint32 `json:"gid,omitempty"` +} + +func GetDeviceNumberString(deviceNumber int64) string { + if deviceNumber == Wildcard { + return "*" + } else { + return fmt.Sprintf("%d", deviceNumber) + } +} + +func (device *Device) GetCgroupAllowString() string { + return fmt.Sprintf("%c %s:%s %s", device.Type, GetDeviceNumberString(device.MajorNumber), GetDeviceNumberString(device.MinorNumber), device.CgroupPermissions) +} + +// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct. +func GetDevice(path, cgroupPermissions string) (*Device, error) { + fileInfo, err := osLstat(path) + if err != nil { + return nil, err + } + + var ( + devType rune + mode = fileInfo.Mode() + fileModePermissionBits = os.FileMode.Perm(mode) + ) + + switch { + case mode&os.ModeDevice == 0: + return nil, ErrNotADeviceNode + case mode&os.ModeCharDevice != 0: + fileModePermissionBits |= syscall.S_IFCHR + devType = 'c' + default: + fileModePermissionBits |= syscall.S_IFBLK + devType = 'b' + } + + stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return nil, fmt.Errorf("cannot determine the device number for device %s", path) + } + devNumber := int(stat_t.Rdev) + + return &Device{ + Type: devType, + Path: path, + MajorNumber: Major(devNumber), + MinorNumber: Minor(devNumber), + CgroupPermissions: cgroupPermissions, + FileMode: fileModePermissionBits, + Uid: stat_t.Uid, + Gid: stat_t.Gid, + }, nil +} + +func GetHostDeviceNodes() ([]*Device, error) { + return getDeviceNodes("/dev") +} + +func getDeviceNodes(path string) ([]*Device, error) { + files, err := ioutilReadDir(path) + if err != nil { + return nil, err + } + + out := []*Device{} + for _, f := range files { + if f.IsDir() { + switch f.Name() { + case "pts", "shm", "fd": + continue + default: + sub, err := getDeviceNodes(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + } + + device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADeviceNode { + continue + } + return nil, err + } + out = append(out, device) + } + + return out, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/devices/devices_test.go docker.io-1.3.2~dfsg1/libcontainer/devices/devices_test.go --- docker.io-0.9.1~dfsg1/libcontainer/devices/devices_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/devices/devices_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,61 @@ +package devices + +import ( + "errors" + "os" + "testing" +) + +func TestGetDeviceLstatFailure(t *testing.T) { + testError := errors.New("test error") + + // Override os.Lstat to inject error. + osLstat = func(path string) (os.FileInfo, error) { + return nil, testError + } + + _, err := GetDevice("", "") + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirFailure(t *testing.T) { + testError := errors.New("test error") + + // Override ioutil.ReadDir to inject error. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + return nil, testError + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirDeepFailure(t *testing.T) { + testError := errors.New("test error") + called := false + + // Override ioutil.ReadDir to inject error after the first call. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + if called { + return nil, testError + } + called = true + + // Provoke a second call. + fi, err := os.Lstat("/tmp") + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + return []os.FileInfo{fi}, nil + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/devices/number.go docker.io-1.3.2~dfsg1/libcontainer/devices/number.go --- docker.io-0.9.1~dfsg1/libcontainer/devices/number.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/devices/number.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,26 @@ +package devices + +/* + +This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. + +You can read what they are here: + + - http://www.makelinux.net/ldd3/chp-3-sect-2 + - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 + +Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 + +*/ + +func Major(devNumber int) int64 { + return int64((devNumber >> 8) & 0xfff) +} + +func Minor(devNumber int) int64 { + return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} + +func Mkdev(majorNumber int64, minorNumber int64) int { + return int((majorNumber << 8) | (minorNumber & 0xff) | ((minorNumber & 0xfff00) << 12)) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/Dockerfile docker.io-1.3.2~dfsg1/libcontainer/Dockerfile --- docker.io-0.9.1~dfsg1/libcontainer/Dockerfile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/Dockerfile 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,23 @@ +FROM crosbymichael/golang + +RUN apt-get update && apt-get install -y gcc make +RUN go get code.google.com/p/go.tools/cmd/cover + +# setup a playground for us to spawn containers in +RUN mkdir /busybox && \ + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox + +RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ + chmod +x /dind + +COPY . /go/src/github.com/docker/libcontainer +WORKDIR /go/src/github.com/docker/libcontainer +RUN cp sample_configs/minimal.json /busybox/container.json + +ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor + +RUN go get -d -v ./... +RUN make direct-install + +ENTRYPOINT ["/dind"] +CMD ["make", "direct-test"] diff -Nru docker.io-0.9.1~dfsg1/libcontainer/error.go docker.io-1.3.2~dfsg1/libcontainer/error.go --- docker.io-0.9.1~dfsg1/libcontainer/error.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/error.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,37 @@ +package libcontainer + +// API error code type. +type ErrorCode int + +// API error codes. +const ( + // Factory errors + IdInUse ErrorCode = iota + InvalidIdFormat + // TODO: add Load errors + + // Container errors + ContainerDestroyed + ContainerPaused + + // Common errors + ConfigInvalid + SystemError +) + +// API Error type. +type Error interface { + error + + // Returns the stack trace, if any, which identifies the + // point at which the error occurred. + Stack() []byte + + // Returns a verbose string including the error message + // and a representation of the stack trace suitable for + // printing. + Detail() string + + // Returns the error code for this error. + Code() ErrorCode +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/factory.go docker.io-1.3.2~dfsg1/libcontainer/factory.go --- docker.io-0.9.1~dfsg1/libcontainer/factory.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/factory.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,32 @@ +package libcontainer + +type Factory interface { + + // Creates a new container with the given id and starts the initial process inside it. + // id must be a string containing only letters, digits and underscores and must contain + // between 1 and 1024 characters, inclusive. + // + // The id must not already be in use by an existing container. Containers created using + // a factory with the same path (and file system) must have distinct ids. + // + // Returns the new container with a running process. + // + // Errors: + // IdInUse - id is already in use by a container + // InvalidIdFormat - id has incorrect format + // ConfigInvalid - config is invalid + // SystemError - System error + // + // On error, any partially created container parts are cleaned up (the operation is atomic). + Create(id string, config *Config) (Container, Error) + + // Load takes an ID for an existing container and reconstructs the container + // from the state. + // + // Errors: + // Path does not exist + // Container is stopped + // System error + // TODO: fix description + Load(id string) (Container, Error) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/label/label.go docker.io-1.3.2~dfsg1/libcontainer/label/label.go --- docker.io-0.9.1~dfsg1/libcontainer/label/label.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/label/label.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,45 @@ +// +build !selinux !linux + +package label + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. +func InitLabels(options []string) (string, string, error) { + return "", "", nil +} + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, mountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, relabel string) error { + return nil +} + +func GetPidLabel(pid int) (string, error) { + return "", nil +} + +func Init() { +} + +func ReserveLabel(label string) error { + return nil +} + +func UnreserveLabel(label string) error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/label/label_selinux.go docker.io-1.3.2~dfsg1/libcontainer/label/label_selinux.go --- docker.io-0.9.1~dfsg1/libcontainer/label/label_selinux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/label/label_selinux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,132 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "strings" + + "github.com/docker/libcontainer/selinux" +) + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. The labels returned will include a random MCS String, that is +// guaranteed to be unique. +func InitLabels(options []string) (string, string, error) { + if !selinux.SelinuxEnabled() { + return "", "", nil + } + var err error + processLabel, mountLabel := selinux.GetLxcContexts() + if processLabel != "" { + pcon := selinux.NewContext(processLabel) + mcon := selinux.NewContext(mountLabel) + for _, opt := range options { + if opt == "disable" { + return "", "", nil + } + if i := strings.Index(opt, ":"); i == -1 { + return "", "", fmt.Errorf("Bad SELinux Option") + } + con := strings.SplitN(opt, ":", 2) + pcon[con[0]] = con[1] + if con[0] == "level" || con[0] == "user" { + mcon[con[0]] = con[1] + } + } + processLabel = pcon.Get() + mountLabel = mcon.Get() + } + return processLabel, mountLabel, err +} + +// DEPRECATED: The GenLabels function is only to be used during the transition to the official API. +func GenLabels(options string) (string, string, error) { + return InitLabels(strings.Fields(options)) +} + +// FormatMountLabel returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("context=%q", mountLabel) + default: + src = fmt.Sprintf("%s,context=%q", src, mountLabel) + } + } + return src +} + +// SetProcessLabel takes a process label and tells the kernel to assign the +// label to the next program executed by the current process. +func SetProcessLabel(processLabel string) error { + if processLabel == "" { + return nil + } + return selinux.Setexeccon(processLabel) +} + +// GetProcessLabel returns the process label that the kernel will assign +// to the next program executed by the current process. If "" is returned +// this indicates that the default labeling will happen for the process. +func GetProcessLabel() (string, error) { + return selinux.Getexeccon() +} + +// SetFileLabel modifies the "path" label to the specified file label +func SetFileLabel(path string, fileLabel string) error { + if selinux.SelinuxEnabled() && fileLabel != "" { + return selinux.Setfilecon(path, fileLabel) + } + return nil +} + +// Change the label of path to the filelabel string. If the relabel string +// is "z", relabel will change the MCS label to s0. This will allow all +// containers to share the content. If the relabel string is a "Z" then +// the MCS label should continue to be used. SELinux will use this field +// to make sure the content can not be shared by other containes. +func Relabel(path string, fileLabel string, relabel string) error { + if fileLabel == "" { + return nil + } + if relabel == "z" { + c := selinux.NewContext(fileLabel) + c["level"] = "s0" + fileLabel = c.Get() + } + return selinux.Chcon(path, fileLabel, true) +} + +// GetPidLabel will return the label of the process running with the specified pid +func GetPidLabel(pid int) (string, error) { + return selinux.Getpidcon(pid) +} + +// Init initialises the labeling system +func Init() { + selinux.SelinuxEnabled() +} + +// ReserveLabel will record the fact that the MCS label has already been used. +// This will prevent InitLabels from using the MCS label in a newly created +// container +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} + +// UnreserveLabel will remove the reservation of the MCS label. +// This will allow InitLabels to use the MCS label in a newly created +// containers +func UnreserveLabel(label string) error { + selinux.FreeLxcContexts(label) + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/label/label_selinux_test.go docker.io-1.3.2~dfsg1/libcontainer/label/label_selinux_test.go --- docker.io-0.9.1~dfsg1/libcontainer/label/label_selinux_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/label/label_selinux_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,48 @@ +// +build selinux,linux + +package label + +import ( + "testing" + + "github.com/docker/libcontainer/selinux" +) + +func TestInit(t *testing.T) { + if selinux.SelinuxEnabled() { + var testNull []string + plabel, mlabel, err := InitLabels(testNull) + if err != nil { + t.Log("InitLabels Failed") + t.Fatal(err) + } + testDisabled := []string{"disable"} + plabel, mlabel, err = InitLabels(testDisabled) + if err != nil { + t.Log("InitLabels Disabled Failed") + t.Fatal(err) + } + if plabel != "" { + t.Log("InitLabels Disabled Failed") + t.Fatal() + } + testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"} + plabel, mlabel, err = InitLabels(testUser) + if err != nil { + t.Log("InitLabels User Failed") + t.Fatal(err) + } + if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" { + t.Log("InitLabels User Failed") + t.Log(plabel, mlabel) + t.Fatal(err) + } + + testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"} + plabel, mlabel, err = InitLabels(testBadData) + if err == nil { + t.Log("InitLabels Bad Failed") + t.Fatal(err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/LICENSE docker.io-1.3.2~dfsg1/libcontainer/LICENSE --- docker.io-0.9.1~dfsg1/libcontainer/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/LICENSE 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/MAINTAINERS docker.io-1.3.2~dfsg1/libcontainer/MAINTAINERS --- docker.io-0.9.1~dfsg1/libcontainer/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/MAINTAINERS 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,6 @@ +Michael Crosby (@crosbymichael) +Rohit Jnagal (@rjnagal) +Victor Marmol (@vmarmol) +Mrunal Patel (@mrunalp) +.travis.yml: Tianon Gravi (@tianon) +update-vendor.sh: Tianon Gravi (@tianon) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/MAINTAINERS_GUIDE.md docker.io-1.3.2~dfsg1/libcontainer/MAINTAINERS_GUIDE.md --- docker.io-0.9.1~dfsg1/libcontainer/MAINTAINERS_GUIDE.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/MAINTAINERS_GUIDE.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,99 @@ +# The libcontainer Maintainers' Guide + +## Introduction + +Dear maintainer. Thank you for investing the time and energy to help +make libcontainer as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers work is what distinguishes the good projects from the +great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. + +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. + +This is a living document - if you see something out of date or missing, +speak up! + +## What are a maintainer's responsibility? + +It is every maintainer's responsibility to: + +* 1) Expose a clear roadmap for improving their component. +* 2) Deliver prompt feedback and decisions on pull requests. +* 3) Be available to anyone with questions, bug reports, criticism etc. + on their component. This includes IRC, GitHub requests and the mailing + list. +* 4) Make sure their component respects the philosophy, design and + roadmap of the project. + +## How are decisions made? + +Short answer: with pull requests to the libcontainer repository. + +libcontainer is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, roadmap and APIs. *If it's +part of the project, it's in the repo. It's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto. And so on. + +All decisions affecting libcontainer, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do +this (see below "Who decides what?") + + +## Who decides what? + +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +two LGTMs. + +libcontainer follows the timeless, highly efficient and totally unfair system +known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL. +This means that all decisions are made by default by Michael. Since making +every decision himself would be highly un-scalable, in practice decisions +are spread across multiple maintainers. + +The relevant maintainers for a pull request can be worked out in two steps: + +* Step 1: Determine the subdirectories affected by the pull request. This + might be `netlink/` and `security/`, or any other part of the repo. + +* Step 2: Find the `MAINTAINERS` file which affects this directory. If the + directory itself does not have a `MAINTAINERS` file, work your way up + the repo hierarchy until you find one. + +### I'm a maintainer, and I'm going on holiday + +Please let your co-maintainers and other contributors know by raising a pull +request that comments out your `MAINTAINERS` file entry using a `#`. + +### I'm a maintainer, should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +### Who assigns maintainers? + +Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files. + +### How is this process changed? + +Just like everything else: by making a pull request :) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/Makefile docker.io-1.3.2~dfsg1/libcontainer/Makefile --- docker.io-0.9.1~dfsg1/libcontainer/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/Makefile 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,24 @@ + +all: + docker build -t docker/libcontainer . + +test: + # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting + docker run --rm -it --privileged docker/libcontainer + +sh: + docker run --rm -it --privileged -w /busybox docker/libcontainer nsinit exec sh + +GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) + +direct-test: + go test -cover -v $(GO_PACKAGES) + +direct-test-short: + go test -cover -test.short -v $(GO_PACKAGES) + +direct-build: + go build -v $(GO_PACKAGES) + +direct-install: + go install -v $(GO_PACKAGES) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/init.go docker.io-1.3.2~dfsg1/libcontainer/mount/init.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/init.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,208 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount/nodes" +) + +// default mount point flags +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +type mount struct { + source string + path string + device string + flags int + data string +} + +// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a +// new mount namespace. +func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error { + var ( + err error + flag = syscall.MS_PRIVATE + ) + + if mountConfig.NoPivotRoot { + flag = syscall.MS_SLAVE + } + + if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { + return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err) + } + + if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mouting %s as bind %s", rootfs, err) + } + + if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil { + return fmt.Errorf("mount system %s", err) + } + + // apply any user specified mounts within the new mount namespace + for _, m := range mountConfig.Mounts { + if err := m.Mount(rootfs, mountConfig.MountLabel); err != nil { + return err + } + } + + if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil { + return fmt.Errorf("create device nodes %s", err) + } + + if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil { + return err + } + + // stdin, stdout and stderr could be pointing to /dev/null from parent namespace. + // Re-open them inside this namespace. + if err := reOpenDevNull(rootfs); err != nil { + return fmt.Errorf("Failed to reopen /dev/null %s", err) + } + + if err := setupDevSymlinks(rootfs); err != nil { + return fmt.Errorf("dev symlinks %s", err) + } + + if err := syscall.Chdir(rootfs); err != nil { + return fmt.Errorf("chdir into %s %s", rootfs, err) + } + + if mountConfig.NoPivotRoot { + err = MsMoveRoot(rootfs) + } else { + err = PivotRoot(rootfs) + } + + if err != nil { + return err + } + + if mountConfig.ReadonlyFs { + if err := SetReadonly(); err != nil { + return fmt.Errorf("set readonly %s", err) + } + } + + syscall.Umask(0022) + + return nil +} + +// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts +// inside the mount namespace +func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error { + for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly) { + if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { + return fmt.Errorf("mkdirall %s %s", m.path, err) + } + if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) + } + } + return nil +} + +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/kcore"}) + } + + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + + return nil +} + +// TODO: this is crappy right now and should be cleaned up with a better way of handling system and +// standard bind mounts allowing them to be more dynamic +func newSystemMounts(rootfs, mountLabel string, sysReadonly bool) []mount { + systemMounts := []mount{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, + } + + sysMountFlags := defaultMountFlags + if sysReadonly { + sysMountFlags |= syscall.MS_RDONLY + } + + systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags}) + + return systemMounts +} + +// Is stdin, stdout or stderr were to be pointing to '/dev/null', +// this method will make them point to '/dev/null' from within this namespace. +func reOpenDevNull(rootfs string) error { + var stat, devNullStat syscall.Stat_t + file, err := os.Open(filepath.Join(rootfs, "/dev/null")) + if err != nil { + return fmt.Errorf("Failed to open /dev/null - %s", err) + } + defer file.Close() + if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { + return fmt.Errorf("Failed to stat /dev/null - %s", err) + } + for fd := 0; fd < 3; fd++ { + if err = syscall.Fstat(fd, &stat); err != nil { + return fmt.Errorf("Failed to stat fd %d - %s", fd, err) + } + if stat.Rdev == devNullStat.Rdev { + // Close and re-open the fd. + if err = syscall.Dup2(int(file.Fd()), fd); err != nil { + return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd, err) + } + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/mount_config.go docker.io-1.3.2~dfsg1/libcontainer/mount/mount_config.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/mount_config.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/mount_config.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,28 @@ +package mount + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +var ErrUnsupported = errors.New("Unsupported method") + +type MountConfig struct { + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root,omitempty"` + + // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable + ReadonlyFs bool `json:"readonly_fs,omitempty"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts []*Mount `json:"mounts,omitempty"` + + // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! + DeviceNodes []*devices.Device `json:"device_nodes,omitempty"` + + MountLabel string `json:"mount_label,omitempty"` +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/mount.go docker.io-1.3.2~dfsg1/libcontainer/mount/mount.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/mount.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/mount.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,109 @@ +package mount + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/libcontainer/label" +) + +type Mount struct { + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` // Source path, in the host namespace + Destination string `json:"destination,omitempty"` // Destination path, in the container + Writable bool `json:"writable,omitempty"` + Relabel string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared + Private bool `json:"private,omitempty"` + Slave bool `json:"slave,omitempty"` +} + +func (m *Mount) Mount(rootfs, mountLabel string) error { + switch m.Type { + case "bind": + return m.bindMount(rootfs, mountLabel) + case "tmpfs": + return m.tmpfsMount(rootfs, mountLabel) + default: + return fmt.Errorf("unsupported mount type %s for %s", m.Type, m.Destination) + } +} + +func (m *Mount) bindMount(rootfs, mountLabel string) error { + var ( + flags = syscall.MS_BIND | syscall.MS_REC + dest = filepath.Join(rootfs, m.Destination) + ) + + if !m.Writable { + flags = flags | syscall.MS_RDONLY + } + + if m.Slave { + flags = flags | syscall.MS_SLAVE + } + + stat, err := os.Stat(m.Source) + if err != nil { + return err + } + + // FIXME: (crosbymichael) This does not belong here and should be done a layer above + dest, err = symlink.FollowSymlinkInScope(dest, rootfs) + if err != nil { + return err + } + + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return fmt.Errorf("creating new bind mount target %s", err) + } + + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) + } + + if !m.Writable { + if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { + return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) + } + } + + if m.Relabel != "" { + if err := label.Relabel(m.Source, mountLabel, m.Relabel); err != nil { + return fmt.Errorf("relabeling %s to %s %s", m.Source, mountLabel, err) + } + } + + if m.Private { + if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { + return fmt.Errorf("mounting %s private %s", dest, err) + } + } + + return nil +} + +func (m *Mount) tmpfsMount(rootfs, mountLabel string) error { + var ( + err error + l = label.FormatMountLabel("", mountLabel) + dest = filepath.Join(rootfs, m.Destination) + ) + + // FIXME: (crosbymichael) This does not belong here and should be done a layer above + if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + return err + } + + if err := createIfNotExists(dest, true); err != nil { + return fmt.Errorf("creating new tmpfs mount target %s", err) + } + + if err := syscall.Mount("tmpfs", dest, "tmpfs", uintptr(defaultMountFlags), l); err != nil { + return fmt.Errorf("%s mounting %s in tmpfs", err, dest) + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/msmoveroot.go docker.io-1.3.2~dfsg1/libcontainer/mount/msmoveroot.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/msmoveroot.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/msmoveroot.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,20 @@ +// +build linux + +package mount + +import ( + "fmt" + "syscall" +) + +func MsMoveRoot(rootfs string) error { + if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return fmt.Errorf("mount move %s into / %s", rootfs, err) + } + + if err := syscall.Chroot("."); err != nil { + return fmt.Errorf("chroot . %s", err) + } + + return syscall.Chdir("/") +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/nodes/nodes.go docker.io-1.3.2~dfsg1/libcontainer/mount/nodes/nodes.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/nodes/nodes.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/nodes/nodes.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,57 @@ +// +build linux + +package nodes + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/devices" +) + +// Create the device nodes in the container. +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + oldMask := syscall.Umask(0000) + defer syscall.Umask(oldMask) + + for _, node := range nodesToCreate { + if err := CreateDeviceNode(rootfs, node); err != nil { + return err + } + } + return nil +} + +// Creates the device node in the rootfs of the container. +func CreateDeviceNode(rootfs string, node *devices.Device) error { + var ( + dest = filepath.Join(rootfs, node.Path) + parent = filepath.Dir(dest) + ) + + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + + fileMode := node.FileMode + switch node.Type { + case 'c': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + + if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) { + return fmt.Errorf("mknod %s %s", node.Path, err) + } + + if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil { + return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid) + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/nodes/nodes_unsupported.go docker.io-1.3.2~dfsg1/libcontainer/mount/nodes/nodes_unsupported.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/nodes/nodes_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/nodes/nodes_unsupported.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,13 @@ +// +build !linux + +package nodes + +import ( + "errors" + + "github.com/docker/libcontainer/devices" +) + +func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error { + return errors.New("Unsupported method") +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/pivotroot.go docker.io-1.3.2~dfsg1/libcontainer/mount/pivotroot.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/pivotroot.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/pivotroot.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,34 @@ +// +build linux + +package mount + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +func PivotRoot(rootfs string) error { + pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") + if err != nil { + return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) + } + + if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + + // path to pivot dir now changed, update + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("unmount pivot_root dir %s", err) + } + + return os.Remove(pivotDir) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/ptmx.go docker.io-1.3.2~dfsg1/libcontainer/mount/ptmx.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/ptmx.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/ptmx.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,30 @@ +// +build linux + +package mount + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/libcontainer/console" +) + +func SetupPtmx(rootfs, consolePath, mountLabel string) error { + ptmx := filepath.Join(rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + + if consolePath != "" { + if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { + return err + } + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/readonly.go docker.io-1.3.2~dfsg1/libcontainer/mount/readonly.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/readonly.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/readonly.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,11 @@ +// +build linux + +package mount + +import ( + "syscall" +) + +func SetReadonly() error { + return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/mount/remount.go docker.io-1.3.2~dfsg1/libcontainer/mount/remount.go --- docker.io-0.9.1~dfsg1/libcontainer/mount/remount.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/mount/remount.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import "syscall" + +func RemountProc() error { + if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil { + return err + } + + if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { + return err + } + + return nil +} + +func RemountSys() error { + if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + return err + } + } else { + if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { + return err + } + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/create.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/create.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/create.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/create.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,10 @@ +package namespaces + +import ( + "os" + "os/exec" + + "github.com/docker/libcontainer" +) + +type CreateCommand func(container *libcontainer.Config, console, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/exec.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/exec.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/exec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/exec.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,200 @@ +// +build linux + +package namespaces + +import ( + "io" + "os" + "os/exec" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/syncpipe" + "github.com/docker/libcontainer/system" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Exec performs setup outside of a namespace so that a container can be +// executed. Exec is a high level function for working with container namespaces. +func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { + var ( + err error + ) + + // create a pipe so that we can syncronize with the namespaced process and + // pass the veth name to the child + syncPipe, err := syncpipe.NewSyncPipe() + if err != nil { + return -1, err + } + defer syncPipe.Close() + + command := createCommand(container, console, dataPath, os.Args[0], syncPipe.Child(), args) + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + command.Stdin = stdin + command.Stdout = stdout + command.Stderr = stderr + + if err := command.Start(); err != nil { + return -1, err + } + + // Now we passed the pipe to the child, close our side + syncPipe.CloseChild() + + started, err := system.GetProcessStartTime(command.Process.Pid) + if err != nil { + return -1, err + } + + // Do this before syncing with child so that no children + // can escape the cgroup + cgroupRef, err := SetupCgroups(container, command.Process.Pid) + if err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + defer cgroupRef.Cleanup() + + cgroupPaths, err := cgroupRef.Paths() + if err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + var networkState network.NetworkState + if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + state := &libcontainer.State{ + InitPid: command.Process.Pid, + InitStartTime: started, + NetworkState: networkState, + CgroupPaths: cgroupPaths, + } + + if err := libcontainer.SaveState(dataPath, state); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + defer libcontainer.DeleteState(dataPath) + + // Sync with child + if err := syncPipe.ReadFromChild(); err != nil { + command.Process.Kill() + command.Wait() + return -1, err + } + + if startCallback != nil { + startCallback() + } + + if err := command.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +// +// console: the /dev/console to setup inside the container +// init: the program executed inside the namespaces +// root: the path to the container json file and information +// pipe: sync pipe to synchronize the parent and child processes +// args: the arguments to pass to the container to run as the user's program +func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + env := []string{ + "console=" + console, + "pipe=3", + "data_path=" + dataPath, + } + + /* + TODO: move user and wd into env + if user != "" { + env = append(env, "user="+user) + } + if workingDir != "" { + env = append(env, "wd="+workingDir) + } + */ + + command := exec.Command(init, append([]string{"init", "--"}, args...)...) + // make sure the process is executed inside the context of the rootfs + command.Dir = container.RootFs + command.Env = append(os.Environ(), env...) + + if command.SysProcAttr == nil { + command.SysProcAttr = &syscall.SysProcAttr{} + } + command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces)) + + command.SysProcAttr.Pdeathsig = syscall.SIGKILL + command.ExtraFiles = []*os.File{pipe} + + return command +} + +// SetupCgroups applies the cgroup restrictions to the process running in the container based +// on the container's configuration +func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) { + if container.Cgroups != nil { + c := container.Cgroups + + if systemd.UseSystemd() { + return systemd.Apply(c, nspid) + } + + return fs.Apply(c, nspid) + } + + return nil, nil +} + +// InitializeNetworking creates the container's network stack outside of the namespace and moves +// interfaces into the container's net namespaces if necessary +func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil { + return err + } + } + return pipe.SendToChild(networkState) +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := GetNamespace(key); ns != nil { + flag |= ns.Value + } + } + } + return flag +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/execin.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/execin.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/execin.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/execin.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,119 @@ +// +build linux + +package namespaces + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/syncpipe" + "github.com/docker/libcontainer/system" +) + +// ExecIn reexec's the initPath with the argv 0 rewrite to "nsenter" so that it is able to run the +// setns code in a single threaded environment joining the existing containers' namespaces. +func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs []string, initPath, action string, + stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) { + + args := []string{fmt.Sprintf("nsenter-%s", action), "--nspid", strconv.Itoa(state.InitPid)} + + if console != "" { + args = append(args, "--console", console) + } + + cmd := &exec.Cmd{ + Path: initPath, + Args: append(args, append([]string{"--"}, userArgs...)...), + } + + if filepath.Base(initPath) == initPath { + if lp, err := exec.LookPath(initPath); err == nil { + cmd.Path = lp + } + } + + pipe, err := syncpipe.NewSyncPipe() + if err != nil { + return -1, err + } + defer pipe.Close() + + // Note: these are only used in non-tty mode + // if there is a tty for the container it will be opened within the namespace and the + // fds will be duped to stdin, stdiout, and stderr + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + + cmd.ExtraFiles = []*os.File{pipe.Child()} + + if err := cmd.Start(); err != nil { + return -1, err + } + pipe.CloseChild() + + // Enter cgroups. + if err := EnterCgroups(state, cmd.Process.Pid); err != nil { + return -1, err + } + + if err := pipe.SendToChild(container); err != nil { + cmd.Process.Kill() + cmd.Wait() + return -1, err + } + + if startCallback != nil { + startCallback(cmd) + } + + if err := cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + + return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// Finalize expects that the setns calls have been setup and that is has joined an +// existing namespace +func FinalizeSetns(container *libcontainer.Config, args []string) error { + // clear the current processes env and replace it with the environment defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + if err := FinalizeNamespace(container); err != nil { + return err + } + + if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err) + } + + if container.ProcessLabel != "" { + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return err + } + } + + if err := system.Execv(args[0], args[0:], container.Env); err != nil { + return err + } + + panic("unreachable") +} + +func EnterCgroups(state *libcontainer.State, pid int) error { + return cgroups.EnterPid(state.CgroupPaths, pid) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/init.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/init.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/init.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,263 @@ +// +build linux + +package namespaces + +import ( + "fmt" + "os" + "strings" + "syscall" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/apparmor" + "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/label" + "github.com/docker/libcontainer/mount" + "github.com/docker/libcontainer/netlink" + "github.com/docker/libcontainer/network" + "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/security/restrict" + "github.com/docker/libcontainer/syncpipe" + "github.com/docker/libcontainer/system" + "github.com/docker/libcontainer/user" + "github.com/docker/libcontainer/utils" +) + +// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work. +// Move this to libcontainer package. +// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, +// and other options required for the new container. +// The caller of Init function has to ensure that the go runtime is locked to an OS thread +// (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended. +func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) { + defer func() { + if err != nil { + syncPipe.ReportChildError(err) + } + }() + + rootfs, err := utils.ResolveRootfs(uncleanRootfs) + if err != nil { + return err + } + + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + + // We always read this as it is a way to sync with the parent as well + var networkState *network.NetworkState + if err := syncPipe.ReadFromParent(&networkState); err != nil { + return err + } + + if consolePath != "" { + if err := console.OpenAndDup(consolePath); err != nil { + return err + } + } + if _, err := syscall.Setsid(); err != nil { + return fmt.Errorf("setsid %s", err) + } + if consolePath != "" { + if err := system.Setctty(); err != nil { + return fmt.Errorf("setctty %s", err) + } + } + if err := setupNetwork(container, networkState); err != nil { + return fmt.Errorf("setup networking %s", err) + } + if err := setupRoute(container); err != nil { + return fmt.Errorf("setup route %s", err) + } + + label.Init() + + if err := mount.InitializeMountNamespace(rootfs, + consolePath, + container.RestrictSys, + (*mount.MountConfig)(container.MountConfig)); err != nil { + return fmt.Errorf("setup mount namespace %s", err) + } + + if container.Hostname != "" { + if err := syscall.Sethostname([]byte(container.Hostname)); err != nil { + return fmt.Errorf("sethostname %s", err) + } + } + + if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err) + } + + if err := label.SetProcessLabel(container.ProcessLabel); err != nil { + return fmt.Errorf("set process label %s", err) + } + + // TODO: (crosbymichael) make this configurable at the Config level + if container.RestrictSys { + if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil { + return err + } + } + + pdeathSignal, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if err := FinalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } + + // FinalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := RestoreParentDeathSignal(pdeathSignal); err != nil { + return fmt.Errorf("restore parent death signal %s", err) + } + + return system.Execv(args[0], args[0:], os.Environ()) +} + +// RestoreParentDeathSignal sets the parent death signal to old. +func RestoreParentDeathSignal(old int) error { + if old == 0 { + return nil + } + + current, err := system.GetParentDeathSignal() + if err != nil { + return fmt.Errorf("get parent death signal %s", err) + } + + if old == current { + return nil + } + + if err := system.ParentDeathSignal(uintptr(old)); err != nil { + return fmt.Errorf("set parent death signal %s", err) + } + + // Signal self if parent is already dead. Does nothing if running in a new + // PID namespace, as Getppid will always return 0. + if syscall.Getppid() == 1 { + return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) + } + + return nil +} + +// SetupUser changes the groups, gid, and uid for the user inside the container +func SetupUser(u string) error { + uid, gid, suppGids, home, err := user.GetUserGroupSupplementaryHome(u, syscall.Getuid(), syscall.Getgid(), "/") + if err != nil { + return fmt.Errorf("get supplementary groups %s", err) + } + + if err := syscall.Setgroups(suppGids); err != nil { + return fmt.Errorf("setgroups %s", err) + } + + if err := syscall.Setgid(gid); err != nil { + return fmt.Errorf("setgid %s", err) + } + + if err := syscall.Setuid(uid); err != nil { + return fmt.Errorf("setuid %s", err) + } + + // if we didn't get HOME already, set it based on the user's HOME + if envHome := os.Getenv("HOME"); envHome == "" { + if err := os.Setenv("HOME", home); err != nil { + return fmt.Errorf("set HOME %s", err) + } + } + + return nil +} + +// setupVethNetwork uses the Network config if it is not nil to initialize +// the new veth interface inside the container for use by changing the name to eth0 +// setting the MTU and IP address along with the default gateway +func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error { + for _, config := range container.Networks { + strategy, err := network.GetStrategy(config.Type) + if err != nil { + return err + } + + err1 := strategy.Initialize((*network.Network)(config), networkState) + if err1 != nil { + return err1 + } + } + return nil +} + +func setupRoute(container *libcontainer.Config) error { + for _, config := range container.Routes { + if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil { + return err + } + } + return nil +} + +// FinalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaky file descriptors +// before execing the command inside the namespace +func FinalizeNamespace(container *libcontainer.Config) error { + // Ensure that all non-standard fds we may have accidentally + // inherited are marked close-on-exec so they stay out of the + // container + if err := utils.CloseExecFrom(3); err != nil { + return fmt.Errorf("close open file descriptors %s", err) + } + + // drop capabilities in bounding set before changing user + if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { + return fmt.Errorf("drop bounding set %s", err) + } + + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return fmt.Errorf("set keep caps %s", err) + } + + if err := SetupUser(container.User); err != nil { + return fmt.Errorf("setup user %s", err) + } + + if err := system.ClearKeepCaps(); err != nil { + return fmt.Errorf("clear keep caps %s", err) + } + + // drop all other capabilities + if err := capabilities.DropCapabilities(container.Capabilities); err != nil { + return fmt.Errorf("drop capabilities %s", err) + } + + if container.WorkingDir != "" { + if err := syscall.Chdir(container.WorkingDir); err != nil { + return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) + } + } + + return nil +} + +func LoadContainerEnvironment(container *libcontainer.Config) error { + os.Clearenv() + for _, pair := range container.Env { + p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter.c docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter.c --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter.c 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter.c 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,218 @@ +// +build cgo +// +// formated with indent -linux nsenter.c + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const kBufSize = 256; +static const char *kNsEnter = "nsenter"; + +void get_args(int *argc, char ***argv) +{ + // Read argv + int fd = open("/proc/self/cmdline", O_RDONLY); + + // Read the whole commandline. + ssize_t contents_size = 0; + ssize_t contents_offset = 0; + char *contents = NULL; + ssize_t bytes_read = 0; + do { + contents_size += kBufSize; + contents = (char *)realloc(contents, contents_size); + bytes_read = + read(fd, contents + contents_offset, + contents_size - contents_offset); + contents_offset += bytes_read; + } + while (bytes_read > 0); + close(fd); + + // Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args. + ssize_t i; + *argc = 0; + for (i = 0; i < contents_offset; i++) { + if (contents[i] == '\0') { + (*argc)++; + } + } + *argv = (char **)malloc(sizeof(char *) * ((*argc) + 1)); + int idx; + for (idx = 0; idx < (*argc); idx++) { + (*argv)[idx] = contents; + contents += strlen(contents) + 1; + } + (*argv)[*argc] = NULL; +} + +// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12) +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 +#define _GNU_SOURCE +#include +#include "syscall.h" +#ifdef SYS_setns +int setns(int fd, int nstype) +{ + return syscall(SYS_setns, fd, nstype); +} +#endif +#endif + +void print_usage() +{ + fprintf(stderr, + "nsenter --nspid --console -- cmd1 arg1 arg2...\n"); +} + +void nsenter() +{ + int argc, c; + char **argv; + get_args(&argc, &argv); + + // check argv 0 to ensure that we are supposed to setns + // we use strncmp to test for a value of "nsenter" but also allows alternate implmentations + // after the setns code path to continue to use the argv 0 to determine actions to be run + // resulting in the ability to specify "nsenter-mknod", "nsenter-exec", etc... + if (strncmp(argv[0], kNsEnter, strlen(kNsEnter)) != 0) { + return; + } + + static const struct option longopts[] = { + {"nspid", required_argument, NULL, 'n'}, + {"console", required_argument, NULL, 't'}, + {NULL, 0, NULL, 0} + }; + + pid_t init_pid = -1; + char *init_pid_str = NULL; + char *console = NULL; + while ((c = getopt_long_only(argc, argv, "n:c:", longopts, NULL)) != -1) { + switch (c) { + case 'n': + init_pid_str = optarg; + break; + case 't': + console = optarg; + break; + } + } + + if (init_pid_str == NULL) { + print_usage(); + exit(1); + } + + init_pid = strtol(init_pid_str, NULL, 10); + if ((init_pid == 0 && errno == EINVAL) || errno == ERANGE) { + fprintf(stderr, + "nsenter: Failed to parse PID from \"%s\" with output \"%d\" and error: \"%s\"\n", + init_pid_str, init_pid, strerror(errno)); + print_usage(); + exit(1); + } + + argc -= 3; + argv += 3; + + if (setsid() == -1) { + fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno)); + exit(1); + } + // before we setns we need to dup the console + int consolefd = -1; + if (console != NULL) { + consolefd = open(console, O_RDWR); + if (consolefd < 0) { + fprintf(stderr, + "nsenter: failed to open console %s %s\n", + console, strerror(errno)); + exit(1); + } + } + // Setns on all supported namespaces. + char ns_dir[PATH_MAX]; + memset(ns_dir, 0, PATH_MAX); + snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid); + + char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt" }; + const int num = sizeof(namespaces) / sizeof(char *); + int i; + for (i = 0; i < num; i++) { + char buf[PATH_MAX]; + memset(buf, 0, PATH_MAX); + snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, namespaces[i]); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + // Ignore nonexistent namespaces. + if (errno == ENOENT) + continue; + + fprintf(stderr, + "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", + buf, namespaces[i], strerror(errno)); + exit(1); + } + // Set the namespace. + if (setns(fd, 0) == -1) { + fprintf(stderr, + "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", + namespaces[i], strerror(errno)); + exit(1); + } + close(fd); + } + + // We must fork to actually enter the PID namespace. + int child = fork(); + if (child == 0) { + if (consolefd != -1) { + if (dup2(consolefd, STDIN_FILENO) != 0) { + fprintf(stderr, "nsenter: failed to dup 0 %s\n", + strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) { + fprintf(stderr, "nsenter: failed to dup 1 %s\n", + strerror(errno)); + exit(1); + } + if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) { + fprintf(stderr, "nsenter: failed to dup 2 %s\n", + strerror(errno)); + exit(1); + } + } + // Finish executing, let the Go runtime take over. + return; + } else { + // Parent, wait for the child. + int status = 0; + if (waitpid(child, &status, 0) == -1) { + fprintf(stderr, + "nsenter: Failed to waitpid with error: \"%s\"\n", + strerror(errno)); + exit(1); + } + // Forward the child's exit code or re-send its death signal. + if (WIFEXITED(status)) { + exit(WEXITSTATUS(status)); + } else if (WIFSIGNALED(status)) { + kill(getpid(), WTERMSIG(status)); + } + + exit(1); + } + + return; +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,10 @@ +// +build linux + +package nsenter + +/* +__attribute__((constructor)) init() { + nsenter(); +} +*/ +import "C" diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter_unsupported.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter_unsupported.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/nsenter_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/nsenter_unsupported.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package nsenter diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/README.md docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/README.md --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/nsenter/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/nsenter/README.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,6 @@ +## nsenter + +The `nsenter` package registers a special init constructor that is called before the Go runtime has +a chance to boot. This provides us the ability to `setns` on existing namespaces and avoid the issues +that the Go runtime has with multiple threads. This constructor is only called if this package is +registered, imported, in your go application and the argv 0 is `nsenter`. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/types.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/types.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/types.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/types.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,50 @@ +package namespaces + +import "errors" + +type ( + Namespace struct { + Key string `json:"key,omitempty"` + Value int `json:"value,omitempty"` + File string `json:"file,omitempty"` + } + Namespaces []*Namespace +) + +// namespaceList is used to convert the libcontainer types +// into the names of the files located in /proc//ns/* for +// each namespace +var ( + namespaceList = Namespaces{} + ErrUnkownNamespace = errors.New("Unknown namespace") + ErrUnsupported = errors.New("Unsupported method") +) + +func (ns *Namespace) String() string { + return ns.Key +} + +func GetNamespace(key string) *Namespace { + for _, ns := range namespaceList { + if ns.Key == key { + cpy := *ns + return &cpy + } + } + return nil +} + +// Contains returns true if the specified Namespace is +// in the slice +func (n Namespaces) Contains(ns string) bool { + return n.Get(ns) != nil +} + +func (n Namespaces) Get(ns string) *Namespace { + for _, nsp := range n { + if nsp != nil && nsp.Key == ns { + return nsp + } + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/types_linux.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/types_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/types_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/types_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,16 @@ +package namespaces + +import ( + "syscall" +) + +func init() { + namespaceList = Namespaces{ + {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, + {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, + {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, + {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, + {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, + {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/namespaces/types_test.go docker.io-1.3.2~dfsg1/libcontainer/namespaces/types_test.go --- docker.io-0.9.1~dfsg1/libcontainer/namespaces/types_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/namespaces/types_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,30 @@ +package namespaces + +import ( + "testing" +) + +func TestNamespacesContains(t *testing.T) { + ns := Namespaces{ + GetNamespace("NEWPID"), + GetNamespace("NEWNS"), + GetNamespace("NEWUTS"), + } + + if ns.Contains("NEWNET") { + t.Fatal("namespaces should not contain NEWNET") + } + + if !ns.Contains("NEWPID") { + t.Fatal("namespaces should contain NEWPID but does not") + } + + withNil := Namespaces{ + GetNamespace("UNDEFINED"), // this element will be nil + GetNamespace("NEWPID"), + } + + if !withNil.Contains("NEWPID") { + t.Fatal("namespaces should contain NEWPID but does not") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/MAINTAINERS docker.io-1.3.2~dfsg1/libcontainer/netlink/MAINTAINERS --- docker.io-0.9.1~dfsg1/libcontainer/netlink/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/MAINTAINERS 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,31 @@ +// Packet netlink provide access to low level Netlink sockets and messages. +// +// Actual implementations are in: +// netlink_linux.go +// netlink_darwin.go +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrWrongSockType = errors.New("Wrong socket type") + ErrShortResponse = errors.New("Got short response from netlink") + ErrInterfaceExists = errors.New("Network interface already exists") +) + +// A Route is a subnet associated with the interface to reach it. +type Route struct { + *net.IPNet + Iface *net.Interface + Default bool +} + +// An IfAddr defines IP network settings for a given network interface +type IfAddr struct { + Iface *net.Interface + IP net.IP + IPNet *net.IPNet +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_arm.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_arm.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_arm.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_arm.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,5 @@ +package netlink + +func ifrDataByte(b byte) uint8 { + return uint8(b) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,1219 @@ +package netlink + +import ( + "encoding/binary" + "fmt" + "io" + "math/rand" + "net" + "os" + "sync/atomic" + "syscall" + "unsafe" +) + +const ( + IFNAMSIZ = 16 + DEFAULT_CHANGE = 0xFFFFFFFF + IFLA_INFO_KIND = 1 + IFLA_INFO_DATA = 2 + VETH_INFO_PEER = 1 + IFLA_MACVLAN_MODE = 1 + IFLA_VLAN_ID = 1 + IFLA_NET_NS_FD = 28 + IFLA_ADDRESS = 1 + SIOC_BRADDBR = 0x89a0 + SIOC_BRDELBR = 0x89a1 + SIOC_BRADDIF = 0x89a2 +) + +const ( + MACVLAN_MODE_PRIVATE = 1 << iota + MACVLAN_MODE_VEPA + MACVLAN_MODE_BRIDGE + MACVLAN_MODE_PASSTHRU +) + +var nextSeqNr uint32 + +type ifreqHwaddr struct { + IfrnName [IFNAMSIZ]byte + IfruHwaddr syscall.RawSockaddr +} + +type ifreqIndex struct { + IfrnName [IFNAMSIZ]byte + IfruIndex int32 +} + +type ifreqFlags struct { + IfrnName [IFNAMSIZ]byte + Ifruflags uint16 +} + +var native binary.ByteOrder + +func init() { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + native = binary.BigEndian + } else { + native = binary.LittleEndian + } +} + +func getIpFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return syscall.AF_INET + } + if ip.To4() != nil { + return syscall.AF_INET + } + return syscall.AF_INET6 +} + +type NetlinkRequestData interface { + Len() int + ToWireFormat() []byte +} + +type IfInfomsg struct { + syscall.IfInfomsg +} + +func newIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := newIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +func (msg *IfInfomsg) ToWireFormat() []byte { + length := syscall.SizeofIfInfomsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = 0 + native.PutUint16(b[2:4], msg.Type) + native.PutUint32(b[4:8], uint32(msg.Index)) + native.PutUint32(b[8:12], msg.Flags) + native.PutUint32(b[12:16], msg.Change) + return b +} + +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + +type IfAddrmsg struct { + syscall.IfAddrmsg +} + +func newIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +func (msg *IfAddrmsg) ToWireFormat() []byte { + length := syscall.SizeofIfAddrmsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Prefixlen + b[2] = msg.Flags + b[3] = msg.Scope + native.PutUint32(b[4:8], msg.Index) + return b +} + +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} + +type RtMsg struct { + syscall.RtMsg +} + +func newRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } +} + +func (msg *RtMsg) ToWireFormat() []byte { + length := syscall.SizeofRtMsg + b := make([]byte, length) + b[0] = msg.Family + b[1] = msg.Dst_len + b[2] = msg.Src_len + b[3] = msg.Tos + b[4] = msg.Table + b[5] = msg.Protocol + b[6] = msg.Scope + b[7] = msg.Type + native.PutUint32(b[8:12], msg.Flags) + return b +} + +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) +} + +type RtAttr struct { + syscall.RtAttr + Data []byte + children []NetlinkRequestData +} + +func newRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := newRtAttr(attrType, data) + parent.children = append(parent.children, attr) + return attr +} + +func (a *RtAttr) Len() int { + if len(a.children) == 0 { + return (syscall.SizeofRtAttr + len(a.Data)) + } + + l := 0 + for _, child := range a.children { + l += child.Len() + } + l += syscall.SizeofRtAttr + return rtaAlignOf(l + len(a.Data)) +} + +func (a *RtAttr) ToWireFormat() []byte { + length := a.Len() + buf := make([]byte, rtaAlignOf(length)) + + if a.Data != nil { + copy(buf[4:], a.Data) + } else { + next := 4 + for _, child := range a.children { + childBuf := child.ToWireFormat() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(length); l != 0 { + native.PutUint16(buf[0:2], l) + } + native.PutUint16(buf[2:4], a.Type) + return buf +} + +func uint32Attr(t int, n uint32) *RtAttr { + buf := make([]byte, 4) + native.PutUint32(buf, n) + return newRtAttr(t, buf) +} + +type NetlinkRequest struct { + syscall.NlMsghdr + Data []NetlinkRequestData +} + +func (rr *NetlinkRequest) ToWireFormat() []byte { + length := rr.Len + dataBytes := make([][]byte, len(rr.Data)) + for i, data := range rr.Data { + dataBytes[i] = data.ToWireFormat() + length += uint32(len(dataBytes[i])) + } + b := make([]byte, length) + native.PutUint32(b[0:4], length) + native.PutUint16(b[4:6], rr.Type) + native.PutUint16(b[6:8], rr.Flags) + native.PutUint32(b[8:12], rr.Seq) + native.PutUint32(b[12:16], rr.Pid) + + next := 16 + for _, data := range dataBytes { + copy(b[next:], data) + next += len(data) + } + return b +} + +func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { + if data != nil { + rr.Data = append(rr.Data, data) + } +} + +func newNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.NLMSG_HDRLEN), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int + lsa syscall.SockaddrNetlink +} + +func getNetlinkSocket() (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: fd, + } + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +func (s *NetlinkSocket) Close() { + syscall.Close(s.fd) +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { + return err + } + return nil +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(s.fd, rb, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, ErrShortResponse + } + rb = rb[:nr] + return syscall.ParseNetlinkMessage(rb) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + lsa, err := syscall.Getsockname(s.fd) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *syscall.SockaddrNetlink: + return v.Pid, nil + } + return 0, ErrWrongSockType +} + +func (s *NetlinkSocket) CheckMessage(m syscall.NetlinkMessage, seq, pid uint32) error { + if m.Header.Seq != seq { + return fmt.Errorf("netlink: invalid seq %d, expected %d", m.Header.Seq, seq) + } + if m.Header.Pid != pid { + return fmt.Errorf("netlink: wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + return io.EOF + } + if m.Header.Type == syscall.NLMSG_ERROR { + e := int32(native.Uint32(m.Data[0:4])) + if e == 0 { + return io.EOF + } + return syscall.Errno(-e) + } + return nil +} + +func (s *NetlinkSocket) HandleAck(seq uint32) error { + pid, err := s.GetPid() + if err != nil { + return err + } + +outer: + for { + msgs, err := s.Receive() + if err != nil { + return err + } + for _, m := range msgs { + if err := s.CheckMessage(m, seq, pid); err != nil { + if err == io.EOF { + break outer + } + return err + } + } + } + + return nil +} + +func zeroTerminated(s string) []byte { + return []byte(s + "\000") +} + +func nonZeroTerminated(s string) []byte { + return []byte(s) +} + +// Add a new network link of a specified type. +// This is identical to running: ip link add $name type $linkType +func NetworkLinkAdd(name string, linkType string) error { + if name == "" || linkType == "" { + return fmt.Errorf("Neither link name nor link type can be empty!") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType)) + wb.AddData(linkInfo) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) + wb.AddData(nameData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Delete a network link. +// This is identical to running: ip link del $name +func NetworkLinkDel(name string) error { + if name == "" { + return fmt.Errorf("Network link name can not be empty!") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + + wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Bring up a particular network interface. +// This is identical to running: ip link set dev $name up +func NetworkLinkUp(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Flags = syscall.IFF_UP + msg.Change = syscall.IFF_UP + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Bring down a particular network interface. +// This is identical to running: ip link set $name down +func NetworkLinkDown(iface *net.Interface) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Flags = 0 & ^syscall.IFF_UP + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Set link layer address ie. MAC Address. +// This is identical to running: ip link set dev $name address $macaddress +func NetworkSetMacAddress(iface *net.Interface, macaddr string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + hwaddr, err := net.ParseMAC(macaddr) + if err != nil { + return err + } + + var ( + MULTICAST byte = 0x1 + LOCALOUI byte = 0x2 + ) + + if hwaddr[0]&0x1 == MULTICAST || hwaddr[0]&0x2 != LOCALOUI { + return fmt.Errorf("Incorrect Local MAC Address specified: %s", macaddr) + } + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + macdata := make([]byte, 6) + copy(macdata, hwaddr) + data := newRtAttr(IFLA_ADDRESS, macdata) + wb.AddData(data) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Set link Maximum Transmission Unit +// This is identical to running: ip link set dev $name mtu $MTU +// bridge is a bitch here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=292088 +// https://bugzilla.redhat.com/show_bug.cgi?id=697021 +// There is a discussion about how to deal with ifcs joining bridge with MTU > 1500 +// Regular network nterfaces do seem to work though! +func NetworkSetMTU(iface *net.Interface, mtu int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(uint32Attr(syscall.IFLA_MTU, uint32(mtu))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(rtattr) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Add an interface to bridge. +// This is identical to running: ip link set $name master $master +func NetworkSetMaster(iface, master *net.Interface) error { + data := uint32Attr(syscall.IFLA_MASTER, uint32(master.Index)) + return networkMasterAction(iface, data) +} + +// Remove an interface from the bridge +// This is is identical to to running: ip link $name set nomaster +func NetworkSetNoMaster(iface *net.Interface) error { + data := uint32Attr(syscall.IFLA_MASTER, 0) + return networkMasterAction(iface, data) +} + +func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + wb.AddData(msg) + wb.AddData(rtattr) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Move a particular network interface to a particular network namespace +// specified by PID. This is idential to running: ip link set dev $name netns $pid +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid)) + return networkSetNsAction(iface, data) +} + +// Move a particular network interface to a particular mounted +// network namespace specified by file descriptor. +// This is idential to running: ip link set dev $name netns $fd +func NetworkSetNsFd(iface *net.Interface, fd int) error { + data := uint32Attr(IFLA_NET_NS_FD, uint32(fd)) + return networkSetNsAction(iface, data) +} + +// Rname a particular interface to a different name +// !!! Note that you can't rename an active interface. You need to bring it down before renaming it. +// This is identical to running: ip link set dev ${oldName} name ${newName} +func NetworkChangeName(iface *net.Interface, newName string) error { + if len(newName) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", newName) + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(newName)) + wb.AddData(nameData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Add a new VETH pair link on the host +// This is identical to running: ip link add name $name type veth peer name $peername +func NetworkCreateVethPair(name1, name2 string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) + wb.AddData(nameData) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) + + newIfInfomsgChild(nest3, syscall.AF_UNSPEC) + newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + + wb.AddData(nest1) + + if err := s.Send(wb); err != nil { + return err + } + + if err := s.HandleAck(wb.Seq); err != nil { + if os.IsExist(err) { + return ErrInterfaceExists + } + + return err + } + + return nil +} + +// Add a new VLAN interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type vlan id $id +func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + masterDevIfc, err := net.InterfaceByName(masterDev) + if err != nil { + return err + } + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("vlan")) + + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + vlanData := make([]byte, 2) + native.PutUint16(vlanData, vlanId) + newRtAttrChild(nest2, IFLA_VLAN_ID, vlanData) + wb.AddData(nest1) + + wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(vlanDev))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Add MAC VLAN network interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type macvlan mode $mode +func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + macVlan := map[string]uint32{ + "private": MACVLAN_MODE_PRIVATE, + "vepa": MACVLAN_MODE_VEPA, + "bridge": MACVLAN_MODE_BRIDGE, + "passthru": MACVLAN_MODE_PASSTHRU, + } + + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + masterDevIfc, err := net.InterfaceByName(masterDev) + if err != nil { + return err + } + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("macvlan")) + + nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) + macVlanData := make([]byte, 4) + native.PutUint32(macVlanData, macVlan[mode]) + newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) + wb.AddData(nest1) + + wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(macVlanDev))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +func networkLinkIpAction(action, flags int, ifa IfAddr) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + family := getIpFamily(ifa.IP) + + wb := newNetlinkRequest(action, flags) + + msg := newIfAddrmsg(family) + msg.Index = uint32(ifa.Iface.Index) + prefixLen, _ := ifa.IPNet.Mask.Size() + msg.Prefixlen = uint8(prefixLen) + wb.AddData(msg) + + var ipData []byte + if family == syscall.AF_INET { + ipData = ifa.IP.To4() + } else { + ipData = ifa.IP.To16() + } + + localData := newRtAttr(syscall.IFA_LOCAL, ipData) + wb.AddData(localData) + + addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) + wb.AddData(addrData) + + if err := s.Send(wb); err != nil { + return err + } + + return s.HandleAck(wb.Seq) +} + +// Delete an IP address from an interface. This is identical to: +// ip addr del $ip/$ipNet dev $iface +func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return networkLinkIpAction( + syscall.RTM_DELADDR, + syscall.NLM_F_ACK, + IfAddr{iface, ip, ipNet}, + ) +} + +// Add an Ip address to an interface. This is identical to: +// ip addr add $ip/$ipNet dev $iface +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return networkLinkIpAction( + syscall.RTM_NEWADDR, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK, + IfAddr{iface, ip, ipNet}, + ) +} + +// Returns an array of IPNet for all the currently routed subnets on ipv4 +// This is similar to the first column of "ip route" output +func NetworkGetRoutes() ([]Route, error) { + s, err := getNetlinkSocket() + if err != nil { + return nil, err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + wb.AddData(msg) + + if err := s.Send(wb); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + res := make([]Route, 0) + +outer: + for { + msgs, err := s.Receive() + if err != nil { + return nil, err + } + for _, m := range msgs { + if err := s.CheckMessage(m, wb.Seq, pid); err != nil { + if err == io.EOF { + break outer + } + return nil, err + } + if m.Header.Type != syscall.RTM_NEWROUTE { + continue + } + + var r Route + + msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) + + if msg.Flags&syscall.RTM_F_CLONED != 0 { + // Ignore cloned routes + continue + } + + if msg.Table != syscall.RT_TABLE_MAIN { + // Ignore non-main tables + continue + } + + if msg.Family != syscall.AF_INET { + // Ignore non-ipv4 routes + continue + } + + if msg.Dst_len == 0 { + // Default routes + r.Default = true + } + + attrs, err := syscall.ParseNetlinkRouteAttr(&m) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_DST: + ip := attr.Value + r.IPNet = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), + } + case syscall.RTA_OIF: + index := int(native.Uint32(attr.Value[0:4])) + r.Iface, _ = net.InterfaceByIndex(index) + } + } + if r.Default || r.IPNet != nil { + res = append(res, r) + } + } + } + + return res, nil +} + +// Add a new route table entry. +func AddRoute(destination, source, gateway, device string) error { + if destination == "" && source == "" && gateway == "" { + return fmt.Errorf("one of destination, source or gateway must not be blank") + } + + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + msg := newRtMsg() + currentFamily := -1 + var rtAttrs []*RtAttr + + if destination != "" { + destIP, destNet, err := net.ParseCIDR(destination) + if err != nil { + return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) + } + destFamily := getIpFamily(destIP) + currentFamily = destFamily + destLen, bits := destNet.Mask.Size() + if destLen == 0 && bits == 0 { + return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) + } + msg.Family = uint8(destFamily) + msg.Dst_len = uint8(destLen) + var destData []byte + if destFamily == syscall.AF_INET { + destData = destIP.To4() + } else { + destData = destIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) + } + + if source != "" { + srcIP, srcNet, err := net.ParseCIDR(source) + if err != nil { + return fmt.Errorf("source CIDR %s couldn't be parsed", source) + } + srcFamily := getIpFamily(srcIP) + if currentFamily != -1 && currentFamily != srcFamily { + return fmt.Errorf("source and destination ip were not the same IP family") + } + currentFamily = srcFamily + srcLen, bits := srcNet.Mask.Size() + if srcLen == 0 && bits == 0 { + return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source) + } + msg.Family = uint8(srcFamily) + msg.Src_len = uint8(srcLen) + var srcData []byte + if srcFamily == syscall.AF_INET { + srcData = srcIP.To4() + } else { + srcData = srcIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData)) + } + + if gateway != "" { + gwIP := net.ParseIP(gateway) + if gwIP == nil { + return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) + } + gwFamily := getIpFamily(gwIP) + if currentFamily != -1 && currentFamily != gwFamily { + return fmt.Errorf("gateway, source, and destination ip were not the same IP family") + } + msg.Family = uint8(gwFamily) + var gwData []byte + if gwFamily == syscall.AF_INET { + gwData = gwIP.To4() + } else { + gwData = gwIP.To16() + } + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + wb.AddData(msg) + for _, attr := range rtAttrs { + wb.AddData(attr) + } + + iface, err := net.InterfaceByName(device) + if err != nil { + return err + } + wb.AddData(uint32Attr(syscall.RTA_OIF, uint32(iface.Index))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + +// Add a new default gateway. Identical to: +// ip route add default via $ip +func AddDefaultGw(ip, device string) error { + return AddRoute("", "", ip, device) +} + +// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE +// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS +// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK +func getIfSocket() (fd int, err error) { + for _, socket := range []int{ + syscall.AF_INET, + syscall.AF_PACKET, + syscall.AF_INET6, + } { + if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { + break + } + } + if err == nil { + return fd, nil + } + return -1, err +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func CreateBridge(name string, setMacAddr bool) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + if setMacAddr { + return SetMacAddress(name, randMacAddr()) + } + return nil +} + +// Delete the actual bridge device. +func DeleteBridge(name string) error { + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + + var ifr ifreqFlags + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name)) + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), + SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + return nil +} + +// Add a slave to abridge device. This is more backward-compatible than +// netlink.NetworkSetMaster and works on RHEL 6. +func AddToBridge(iface, master *net.Interface) error { + if len(master.Name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", master.Name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqIndex{} + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) + ifr.IfruIndex = int32(iface.Index) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + return nil +} + +func randMacAddr() string { + hw := make(net.HardwareAddr, 6) + for i := 0; i < 6; i++ { + hw[i] = byte(rand.Intn(255)) + } + hw[0] &^= 0x1 // clear multicast bit + hw[0] |= 0x2 // set local assignment bit (IEEE802) + return hw.String() +} + +func SetMacAddress(name, addr string) error { + if len(name) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", name) + } + + hw, err := net.ParseMAC(addr) + if err != nil { + return err + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqHwaddr{} + ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) + + for i := 0; i < 6; i++ { + ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i]) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + return nil +} + +func ChangeName(iface *net.Interface, newName string) error { + if len(newName) >= IFNAMSIZ { + return fmt.Errorf("Interface name %s too long", newName) + } + + fd, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(fd) + + data := [IFNAMSIZ * 2]byte{} + // the "-1"s here are very important for ensuring we get proper null + // termination of our new C strings + copy(data[:IFNAMSIZ-1], iface.Name) + copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) + + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { + return errno + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_notarm.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_notarm.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_notarm.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_notarm.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,7 @@ +// +build !arm + +package netlink + +func ifrDataByte(b byte) int8 { + return int8(b) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_test.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_test.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_linux_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_linux_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,356 @@ +package netlink + +import ( + "net" + "strings" + "syscall" + "testing" +) + +type testLink struct { + name string + linkType string +} + +func addLink(t *testing.T, name string, linkType string) { + if err := NetworkLinkAdd(name, linkType); err != nil { + t.Fatalf("Unable to create %s link: %s", name, err) + } +} + +func readLink(t *testing.T, name string) *net.Interface { + iface, err := net.InterfaceByName(name) + if err != nil { + t.Fatalf("Could not find %s interface: %s", name, err) + } + + return iface +} + +func deleteLink(t *testing.T, name string) { + if err := NetworkLinkDel(name); err != nil { + t.Fatalf("Unable to delete %s link: %s", name, err) + } +} + +func upLink(t *testing.T, name string) { + iface := readLink(t, name) + if err := NetworkLinkUp(iface); err != nil { + t.Fatalf("Could not bring UP %#v interface: %s", iface, err) + } +} + +func downLink(t *testing.T, name string) { + iface := readLink(t, name) + if err := NetworkLinkDown(iface); err != nil { + t.Fatalf("Could not bring DOWN %#v interface: %s", iface, err) + } +} + +func ipAssigned(iface *net.Interface, ip net.IP) bool { + addrs, _ := iface.Addrs() + + for _, addr := range addrs { + args := strings.SplitN(addr.String(), "/", 2) + if args[0] == ip.String() { + return true + } + } + + return false +} + +func TestNetworkLinkAddDel(t *testing.T) { + if testing.Short() { + return + } + + testLinks := []testLink{ + {"tstEth", "dummy"}, + {"tstBr", "bridge"}, + } + + for _, tl := range testLinks { + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + readLink(t, tl.name) + } +} + +func TestNetworkLinkUpDown(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + upLink(t, tl.name) + ifcAfterUp := readLink(t, tl.name) + + if (ifcAfterUp.Flags & syscall.IFF_UP) != syscall.IFF_UP { + t.Fatalf("Could not bring UP %#v initerface", tl) + } + + downLink(t, tl.name) + ifcAfterDown := readLink(t, tl.name) + + if (ifcAfterDown.Flags & syscall.IFF_UP) == syscall.IFF_UP { + t.Fatalf("Could not bring DOWN %#v initerface", tl) + } +} + +func TestNetworkSetMacAddress(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + macaddr := "22:ce:e0:99:63:6f" + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ifcBeforeSet := readLink(t, tl.name) + + if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil { + t.Fatalf("Could not set %s MAC address on %#v interface: err", macaddr, tl, err) + } + + ifcAfterSet := readLink(t, tl.name) + + if ifcAfterSet.HardwareAddr.String() != macaddr { + t.Fatalf("Could not set %s MAC address on %#v interface", macaddr, tl) + } +} + +func TestNetworkSetMTU(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{name: "tstEth", linkType: "dummy"} + mtu := 1400 + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ifcBeforeSet := readLink(t, tl.name) + + if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil { + t.Fatalf("Could not set %d MTU on %#v interface: err", mtu, tl, err) + } + + ifcAfterSet := readLink(t, tl.name) + + if ifcAfterSet.MTU != mtu { + t.Fatalf("Could not set %d MTU on %#v interface", mtu, tl) + } +} + +func TestNetworkSetMasterNoMaster(t *testing.T) { + if testing.Short() { + return + } + + master := testLink{"tstBr", "bridge"} + slave := testLink{"tstEth", "dummy"} + testLinks := []testLink{master, slave} + + for _, tl := range testLinks { + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + upLink(t, tl.name) + } + + masterIfc := readLink(t, master.name) + slaveIfc := readLink(t, slave.name) + if err := NetworkSetMaster(slaveIfc, masterIfc); err != nil { + t.Fatalf("Could not set %#v to be the master of %#v: %s", master, slave, err) + } + + // Trying to figure out a way to test which will not break on RHEL6. + // We could check for existence of /sys/class/net/tstEth/upper_tstBr + // which should point to the ../tstBr which is the UPPER device i.e. network bridge + + if err := NetworkSetNoMaster(slaveIfc); err != nil { + t.Fatalf("Could not UNset %#v master of %#v: %s", master, slave, err) + } +} + +func TestNetworkChangeName(t *testing.T) { + if testing.Short() { + return + } + + tl := testLink{"tstEth", "dummy"} + newName := "newTst" + + addLink(t, tl.name, tl.linkType) + + linkIfc := readLink(t, tl.name) + if err := NetworkChangeName(linkIfc, newName); err != nil { + deleteLink(t, tl.name) + t.Fatalf("Could not change %#v interface name to %s: %s", tl, newName, err) + } + + readLink(t, newName) + deleteLink(t, newName) +} + +func TestNetworkLinkAddVlan(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + id uint16 + }{ + name: "tstVlan", + id: 32, + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddVlan(masterLink.name, tl.name, tl.id); err != nil { + t.Fatalf("Unable to create %#v VLAN interface: %s", tl, err) + } + + readLink(t, tl.name) +} + +func TestNetworkLinkAddMacVlan(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + mode string + }{ + name: "tstVlan", + mode: "private", + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddMacVlan(masterLink.name, tl.name, tl.mode); err != nil { + t.Fatalf("Unable to create %#v MAC VLAN interface: %s", tl, err) + } + + readLink(t, tl.name) +} + +func TestAddDelNetworkIp(t *testing.T) { + if testing.Short() { + return + } + + ifaceName := "lo" + ip := net.ParseIP("127.0.1.1") + mask := net.IPv4Mask(255, 255, 255, 255) + ipNet := &net.IPNet{IP: ip, Mask: mask} + + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + t.Skip("No 'lo' interface; skipping tests") + } + + if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) + } + + if !ipAssigned(iface, ip) { + t.Fatalf("Could not locate address '%s' in lo address list.", ip.String()) + } + + if err := NetworkLinkDelIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not delete IP address %s from interface %#v: %s", ip.String(), iface, err) + } + + if ipAssigned(iface, ip) { + t.Fatalf("Located address '%s' in lo address list after removal.", ip.String()) + } +} + +func TestCreateVethPair(t *testing.T) { + if testing.Short() { + return + } + + var ( + name1 = "veth1" + name2 = "veth2" + ) + + if err := NetworkCreateVethPair(name1, name2); err != nil { + t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err) + } + defer NetworkLinkDel(name1) + + readLink(t, name1) + readLink(t, name2) +} + +// +// netlink package tests which do not use RTNETLINK +// +func TestCreateBridgeWithMac(t *testing.T) { + if testing.Short() { + return + } + + name := "testbridge" + + if err := CreateBridge(name, true); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err != nil { + t.Fatal(err) + } + + // cleanup and tests + + if err := DeleteBridge(name); err != nil { + t.Fatal(err) + } + + if _, err := net.InterfaceByName(name); err == nil { + t.Fatalf("expected error getting interface because %s bridge was deleted", name) + } +} + +func TestSetMacAddress(t *testing.T) { + if testing.Short() { + return + } + + name := "testmac" + mac := randMacAddr() + + if err := NetworkLinkAdd(name, "bridge"); err != nil { + t.Fatal(err) + } + defer NetworkLinkDel(name) + + if err := SetMacAddress(name, mac); err != nil { + t.Fatal(err) + } + + iface, err := net.InterfaceByName(name) + if err != nil { + t.Fatal(err) + } + + if iface.HardwareAddr.String() != mac { + t.Fatalf("mac address %q does not match %q", iface.HardwareAddr, mac) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_unsupported.go docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_unsupported.go --- docker.io-0.9.1~dfsg1/libcontainer/netlink/netlink_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/netlink/netlink_unsupported.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,84 @@ +// +build !linux + +package netlink + +import ( + "errors" + "net" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +func NetworkGetRoutes() ([]Route, error) { + return nil, ErrNotImplemented +} + +func NetworkLinkAdd(name string, linkType string) error { + return ErrNotImplemented +} + +func NetworkLinkDel(name string) error { + return ErrNotImplemented +} + +func NetworkLinkUp(iface *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return ErrNotImplemented +} + +func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { + return ErrNotImplemented +} + +func AddRoute(destination, source, gateway, device string) error { + return ErrNotImplemented +} + +func AddDefaultGw(ip, device string) error { + return ErrNotImplemented +} + +func NetworkSetMTU(iface *net.Interface, mtu int) error { + return ErrNotImplemented +} + +func NetworkCreateVethPair(name1, name2 string) error { + return ErrNotImplemented +} + +func NetworkChangeName(iface *net.Interface, newName string) error { + return ErrNotImplemented +} + +func NetworkSetNsFd(iface *net.Interface, fd int) error { + return ErrNotImplemented +} + +func NetworkSetNsPid(iface *net.Interface, nspid int) error { + return ErrNotImplemented +} + +func NetworkSetMaster(iface, master *net.Interface) error { + return ErrNotImplemented +} + +func NetworkLinkDown(iface *net.Interface) error { + return ErrNotImplemented +} + +func CreateBridge(name string, setMacAddr bool) error { + return ErrNotImplemented +} + +func DeleteBridge(name string) error { + return ErrNotImplemented +} + +func AddToBridge(iface, master *net.Interface) error { + return ErrNotImplemented +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/loopback.go docker.io-1.3.2~dfsg1/libcontainer/network/loopback.go --- docker.io-0.9.1~dfsg1/libcontainer/network/loopback.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/loopback.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,23 @@ +// +build linux + +package network + +import ( + "fmt" +) + +// Loopback is a network strategy that provides a basic loopback device +type Loopback struct { +} + +func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error { + return nil +} + +func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error { + // Do not set the MTU on the loopback interface - use the default. + if err := InterfaceUp("lo"); err != nil { + return fmt.Errorf("lo up %s", err) + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/netns.go docker.io-1.3.2~dfsg1/libcontainer/network/netns.go --- docker.io-0.9.1~dfsg1/libcontainer/network/netns.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/netns.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,39 @@ +// +build linux + +package network + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/libcontainer/system" +) + +// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace +type NetNS struct { +} + +func (v *NetNS) Create(n *Network, nspid int, networkState *NetworkState) error { + networkState.NsPath = n.NsPath + return nil +} + +func (v *NetNS) Initialize(config *Network, networkState *NetworkState) error { + if networkState.NsPath == "" { + return fmt.Errorf("nspath does is not specified in NetworkState") + } + + f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("failed get network namespace fd: %v", err) + } + + if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil { + f.Close() + return fmt.Errorf("failed to setns current network namespace: %v", err) + } + + f.Close() + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/network.go docker.io-1.3.2~dfsg1/libcontainer/network/network.go --- docker.io-0.9.1~dfsg1/libcontainer/network/network.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/network.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,97 @@ +// +build linux + +package network + +import ( + "net" + + "github.com/docker/libcontainer/netlink" +) + +func InterfaceUp(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkUp(iface) +} + +func InterfaceDown(name string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkLinkDown(iface) +} + +func ChangeInterfaceName(old, newName string) error { + iface, err := net.InterfaceByName(old) + if err != nil { + return err + } + return netlink.NetworkChangeName(iface, newName) +} + +func CreateVethPair(name1, name2 string) error { + return netlink.NetworkCreateVethPair(name1, name2) +} + +func SetInterfaceInNamespacePid(name string, nsPid int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsPid(iface, nsPid) +} + +func SetInterfaceInNamespaceFd(name string, fd uintptr) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetNsFd(iface, int(fd)) +} + +func SetInterfaceMaster(name, master string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + masterIface, err := net.InterfaceByName(master) + if err != nil { + return err + } + return netlink.AddToBridge(iface, masterIface) +} + +func SetDefaultGateway(ip, ifaceName string) error { + return netlink.AddDefaultGw(ip, ifaceName) +} + +func SetInterfaceMac(name string, macaddr string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMacAddress(iface, macaddr) +} + +func SetInterfaceIp(name string, rawIp string) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(rawIp) + if err != nil { + return err + } + return netlink.NetworkLinkAddIp(iface, ip, ipNet) +} + +func SetMtu(name string, mtu int) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.NetworkSetMTU(iface, mtu) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/stats.go docker.io-1.3.2~dfsg1/libcontainer/network/stats.go --- docker.io-0.9.1~dfsg1/libcontainer/network/stats.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/stats.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,69 @@ +package network + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. +func GetStats(networkState *NetworkState) (*NetworkStats, error) { + // This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer. + if networkState.VethHost == "" { + return &NetworkStats{}, nil + } + data, err := readSysfsNetworkStats(networkState.VethHost) + if err != nil { + return nil, err + } + + // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. + return &NetworkStats{ + RxBytes: data["tx_bytes"], + RxPackets: data["tx_packets"], + RxErrors: data["tx_errors"], + RxDropped: data["tx_dropped"], + TxBytes: data["rx_bytes"], + TxPackets: data["rx_packets"], + TxErrors: data["rx_errors"], + TxDropped: data["rx_dropped"], + }, nil +} + +// Reads all the statistics available under /sys/class/net//statistics as a map with file name as key and data as integers. +func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) { + out := make(map[string]uint64) + + fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/") + err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error { + // skip fullPath. + if path == fullPath { + return nil + } + base := filepath.Base(path) + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return err + } + out[base] = value + return nil + }) + return out, err +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/strategy.go docker.io-1.3.2~dfsg1/libcontainer/network/strategy.go --- docker.io-0.9.1~dfsg1/libcontainer/network/strategy.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/strategy.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,35 @@ +// +build linux + +package network + +import ( + "errors" +) + +var ( + ErrNotValidStrategyType = errors.New("not a valid network strategy type") +) + +var strategies = map[string]NetworkStrategy{ + "veth": &Veth{}, + "loopback": &Loopback{}, + "netns": &NetNS{}, +} + +// NetworkStrategy represents a specific network configuration for +// a container's networking stack +type NetworkStrategy interface { + Create(*Network, int, *NetworkState) error + Initialize(*Network, *NetworkState) error +} + +// GetStrategy returns the specific network strategy for the +// provided type. If no strategy is registered for the type an +// ErrNotValidStrategyType is returned. +func GetStrategy(tpe string) (NetworkStrategy, error) { + s, exists := strategies[tpe] + if !exists { + return nil, ErrNotValidStrategyType + } + return s, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/types.go docker.io-1.3.2~dfsg1/libcontainer/network/types.go --- docker.io-0.9.1~dfsg1/libcontainer/network/types.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/types.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,50 @@ +package network + +// Network defines configuration for a container's networking stack +// +// The network configuration can be omited from a container causing the +// container to be setup with the host's networking stack +type Network struct { + // Type sets the networks type, commonly veth and loopback + Type string `json:"type,omitempty"` + + // Path to network namespace + NsPath string `json:"ns_path,omitempty"` + + // The bridge to use. + Bridge string `json:"bridge,omitempty"` + + // Prefix for the veth interfaces. + VethPrefix string `json:"veth_prefix,omitempty"` + + // MacAddress contains the MAC address to set on the network interface + MacAddress string `json:"mac_address,omitempty"` + + // Address contains the IPv4 and mask to set on the network interface + Address string `json:"address,omitempty"` + + // IPv6Address contains the IPv6 and mask to set on the network interface + IPv6Address string `json:"ipv6_address,omitempty"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway,omitempty"` + + // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface + IPv6Gateway string `json:"ipv6_gateway,omitempty"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + Mtu int `json:"mtu,omitempty"` +} + +// Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers +// Do not depend on it outside of libcontainer. +type NetworkState struct { + // The name of the veth interface on the Host. + VethHost string `json:"veth_host,omitempty"` + // The name of the veth interface created inside the container for the child. + VethChild string `json:"veth_child,omitempty"` + // Net namespace path. + NsPath string `json:"ns_path,omitempty"` +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/veth.go docker.io-1.3.2~dfsg1/libcontainer/network/veth.go --- docker.io-0.9.1~dfsg1/libcontainer/network/veth.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/veth.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,121 @@ +// +build linux + +package network + +import ( + "fmt" + + "github.com/docker/libcontainer/netlink" + "github.com/docker/libcontainer/utils" +) + +// Veth is a network strategy that uses a bridge and creates +// a veth pair, one that stays outside on the host and the other +// is placed inside the container's namespace +type Veth struct { +} + +const defaultDevice = "eth0" + +func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { + var ( + bridge = n.Bridge + prefix = n.VethPrefix + ) + if bridge == "" { + return fmt.Errorf("bridge is not specified") + } + if prefix == "" { + return fmt.Errorf("veth prefix is not specified") + } + name1, name2, err := createVethPair(prefix) + if err != nil { + return err + } + if err := SetInterfaceMaster(name1, bridge); err != nil { + return err + } + if err := SetMtu(name1, n.Mtu); err != nil { + return err + } + if err := InterfaceUp(name1); err != nil { + return err + } + if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { + return err + } + networkState.VethHost = name1 + networkState.VethChild = name2 + + return nil +} + +func (v *Veth) Initialize(config *Network, networkState *NetworkState) error { + var vethChild = networkState.VethChild + if vethChild == "" { + return fmt.Errorf("vethChild is not specified") + } + if err := InterfaceDown(vethChild); err != nil { + return fmt.Errorf("interface down %s %s", vethChild, err) + } + if err := ChangeInterfaceName(vethChild, defaultDevice); err != nil { + return fmt.Errorf("change %s to %s %s", vethChild, defaultDevice, err) + } + if config.MacAddress != "" { + if err := SetInterfaceMac(defaultDevice, config.MacAddress); err != nil { + return fmt.Errorf("set %s mac %s", defaultDevice, err) + } + } + if err := SetInterfaceIp(defaultDevice, config.Address); err != nil { + return fmt.Errorf("set %s ip %s", defaultDevice, err) + } + if config.IPv6Address != "" { + if err := SetInterfaceIp(defaultDevice, config.IPv6Address); err != nil { + return fmt.Errorf("set %s ipv6 %s", defaultDevice, err) + } + } + + if err := SetMtu(defaultDevice, config.Mtu); err != nil { + return fmt.Errorf("set %s mtu to %d %s", defaultDevice, config.Mtu, err) + } + if err := InterfaceUp(defaultDevice); err != nil { + return fmt.Errorf("%s up %s", defaultDevice, err) + } + if config.Gateway != "" { + if err := SetDefaultGateway(config.Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway to %s on device %s failed with %s", config.Gateway, defaultDevice, err) + } + } + if config.IPv6Gateway != "" { + if err := SetDefaultGateway(config.IPv6Gateway, defaultDevice); err != nil { + return fmt.Errorf("set gateway for ipv6 to %s on device %s failed with %s", config.IPv6Gateway, defaultDevice, err) + } + } + return nil +} + +// createVethPair will automatically generage two random names for +// the veth pair and ensure that they have been created +func createVethPair(prefix string) (name1 string, name2 string, err error) { + for i := 0; i < 10; i++ { + if name1, err = utils.GenerateRandomName(prefix, 7); err != nil { + return + } + + if name2, err = utils.GenerateRandomName(prefix, 7); err != nil { + return + } + + if err = CreateVethPair(name1, name2); err != nil { + if err == netlink.ErrInterfaceExists { + continue + } + + return + } + + break + } + + return +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/network/veth_test.go docker.io-1.3.2~dfsg1/libcontainer/network/veth_test.go --- docker.io-0.9.1~dfsg1/libcontainer/network/veth_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/network/veth_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,53 @@ +// +build linux + +package network + +import ( + "testing" + + "github.com/docker/libcontainer/netlink" +) + +func TestGenerateVethNames(t *testing.T) { + if testing.Short() { + return + } + + prefix := "veth" + + name1, name2, err := createVethPair(prefix) + if err != nil { + t.Fatal(err) + } + + if name1 == "" { + t.Fatal("name1 should not be empty") + } + + if name2 == "" { + t.Fatal("name2 should not be empty") + } +} + +func TestCreateDuplicateVethPair(t *testing.T) { + if testing.Short() { + return + } + + prefix := "veth" + + name1, name2, err := createVethPair(prefix) + if err != nil { + t.Fatal(err) + } + + // retry to create the name interfaces and make sure that we get the correct error + err = CreateVethPair(name1, name2) + if err == nil { + t.Fatal("expected error to not be nil with duplicate interface") + } + + if err != netlink.ErrInterfaceExists { + t.Fatalf("expected error to be ErrInterfaceExists but received %q", err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/NOTICE docker.io-1.3.2~dfsg1/libcontainer/NOTICE --- docker.io-0.9.1~dfsg1/libcontainer/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/NOTICE 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,16 @@ +libcontainer +Copyright 2012-2014 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/config.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/config.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/config.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/config.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,29 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" +) + +var configCommand = cli.Command{ + Name: "config", + Usage: "display the container configuration", + Action: configAction, +} + +func configAction(context *cli.Context) { + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + data, err := json.MarshalIndent(container, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/exec.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/exec.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/exec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/exec.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,208 @@ +package main + +import ( + "fmt" + "io" + "log" + "os" + "os/exec" + "os/signal" + "syscall" + "text/tabwriter" + + "github.com/codegangsta/cli" + "github.com/docker/docker/pkg/term" + "github.com/docker/libcontainer" + consolepkg "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/namespaces" +) + +var execCommand = cli.Command{ + Name: "exec", + Usage: "execute a new command inside a container", + Action: execAction, + Flags: []cli.Flag{ + cli.BoolFlag{Name: "list", Usage: "list all registered exec functions"}, + cli.StringFlag{Name: "func", Value: "exec", Usage: "function name to exec inside a container"}, + }, +} + +func execAction(context *cli.Context) { + if context.Bool("list") { + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + fmt.Fprint(w, "NAME\tUSAGE\n") + + for k, f := range argvs { + fmt.Fprintf(w, "%s\t%s\n", k, f.Usage) + } + + w.Flush() + + return + } + + var exitCode int + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil && !os.IsNotExist(err) { + log.Fatalf("unable to read state.json: %s", err) + } + + if state != nil { + exitCode, err = startInExistingContainer(container, state, context.String("func"), context) + } else { + exitCode, err = startContainer(container, dataPath, []string(context.Args())) + } + + if err != nil { + log.Fatalf("failed to exec: %s", err) + } + + os.Exit(exitCode) +} + +// the process for execing a new process inside an existing container is that we have to exec ourself +// with the nsenter argument so that the C code can setns an the namespaces that we require. Then that +// code path will drop us into the path that we can do the final setup of the namespace and exec the users +// application. +func startInExistingContainer(config *libcontainer.Config, state *libcontainer.State, action string, context *cli.Context) (int, error) { + var ( + master *os.File + console string + err error + + sigc = make(chan os.Signal, 10) + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + ) + signal.Notify(sigc) + + if config.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + return -1, err + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + return -1, err + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func(cmd *exec.Cmd) { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.ExecIn(config, state, context.Args(), os.Args[0], action, stdin, stdout, stderr, console, startCallback) +} + +// startContainer starts the container. Returns the exit status or -1 and an +// error. +// +// Signals sent to the current process will be forwarded to container. +func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) { + var ( + cmd *exec.Cmd + sigc = make(chan os.Signal, 10) + ) + + signal.Notify(sigc) + + createCommand := func(container *libcontainer.Config, console, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + cmd = namespaces.DefaultCreateCommand(container, console, dataPath, init, pipe, args) + if logPath != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath)) + } + return cmd + } + + var ( + master *os.File + console string + err error + + stdin = os.Stdin + stdout = os.Stdout + stderr = os.Stderr + ) + + if container.Tty { + stdin = nil + stdout = nil + stderr = nil + + master, console, err = consolepkg.CreateMasterAndConsole() + if err != nil { + return -1, err + } + + go io.Copy(master, os.Stdin) + go io.Copy(os.Stdout, master) + + state, err := term.SetRawTerminal(os.Stdin.Fd()) + if err != nil { + return -1, err + } + + defer term.RestoreTerminal(os.Stdin.Fd(), state) + } + + startCallback := func() { + go func() { + resizeTty(master) + + for sig := range sigc { + switch sig { + case syscall.SIGWINCH: + resizeTty(master) + default: + cmd.Process.Signal(sig) + } + } + }() + } + + return namespaces.Exec(container, stdin, stdout, stderr, console, dataPath, args, createCommand, startCallback) +} + +func resizeTty(master *os.File) { + if master == nil { + return + } + + ws, err := term.GetWinsize(os.Stdin.Fd()) + if err != nil { + return + } + + if err := term.SetWinsize(master.Fd(), ws); err != nil { + return + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/init.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/init.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/init.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,52 @@ +package main + +import ( + "log" + "os" + "runtime" + "strconv" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +var ( + dataPath = os.Getenv("data_path") + console = os.Getenv("console") + rawPipeFd = os.Getenv("pipe") + + initCommand = cli.Command{ + Name: "init", + Usage: "runs the init process inside the namespace", + Action: initAction, + } +) + +func initAction(context *cli.Context) { + runtime.LockOSThread() + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + pipeFd, err := strconv.Atoi(rawPipeFd) + if err != nil { + log.Fatal(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd)) + if err != nil { + log.Fatalf("unable to create sync pipe: %s", err) + } + + if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/main.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/main.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/main.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/main.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,67 @@ +package main + +import ( + "log" + "os" + "strings" + + "github.com/codegangsta/cli" +) + +var ( + logPath = os.Getenv("log") + argvs = make(map[string]*rFunc) +) + +func init() { + argvs["exec"] = &rFunc{ + Usage: "execute a process inside an existing container", + Action: nsenterExec, + } + + argvs["mknod"] = &rFunc{ + Usage: "mknod a device inside an existing container", + Action: nsenterMknod, + } + + argvs["ip"] = &rFunc{ + Usage: "display the container's network interfaces", + Action: nsenterIp, + } +} + +func main() { + // we need to check our argv 0 for any registred functions to run instead of the + // normal cli code path + f, exists := argvs[strings.TrimPrefix(os.Args[0], "nsenter-")] + if exists { + runFunc(f) + + return + } + + app := cli.NewApp() + + app.Name = "nsinit" + app.Version = "0.1" + app.Author = "libcontainer maintainers" + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "nspid"}, + cli.StringFlag{Name: "console"}, + } + + app.Before = preload + + app.Commands = []cli.Command{ + execCommand, + initCommand, + statsCommand, + configCommand, + pauseCommand, + unpauseCommand, + } + + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/nsenter.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/nsenter.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/nsenter.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/nsenter.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "log" + "net" + "os" + "strconv" + "strings" + "text/tabwriter" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/devices" + "github.com/docker/libcontainer/mount/nodes" + "github.com/docker/libcontainer/namespaces" + _ "github.com/docker/libcontainer/namespaces/nsenter" +) + +// nsenterExec exec's a process inside an existing container +func nsenterExec(config *libcontainer.Config, args []string) { + if err := namespaces.FinalizeSetns(config, args); err != nil { + log.Fatalf("failed to nsenter: %s", err) + } +} + +// nsenterMknod runs mknod inside an existing container +// +// mknod +func nsenterMknod(config *libcontainer.Config, args []string) { + if len(args) != 4 { + log.Fatalf("expected mknod to have 4 arguments not %d", len(args)) + } + + t := rune(args[1][0]) + + major, err := strconv.Atoi(args[2]) + if err != nil { + log.Fatal(err) + } + + minor, err := strconv.Atoi(args[3]) + if err != nil { + log.Fatal(err) + } + + n := &devices.Device{ + Path: args[0], + Type: t, + MajorNumber: int64(major), + MinorNumber: int64(minor), + } + + if err := nodes.CreateDeviceNode("/", n); err != nil { + log.Fatal(err) + } +} + +// nsenterIp displays the network interfaces inside a container's net namespace +func nsenterIp(config *libcontainer.Config, args []string) { + interfaces, err := net.Interfaces() + if err != nil { + log.Fatal(err) + } + + w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) + fmt.Fprint(w, "NAME\tMTU\tMAC\tFLAG\tADDRS\n") + + for _, iface := range interfaces { + addrs, err := iface.Addrs() + if err != nil { + log.Fatal(err) + } + + o := []string{} + + for _, a := range addrs { + o = append(o, a.String()) + } + + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", iface.Name, iface.MTU, iface.HardwareAddr, iface.Flags, strings.Join(o, ",")) + } + + w.Flush() +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/pause.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/pause.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/pause.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/pause.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,49 @@ +package main + +import ( + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/docker/libcontainer/cgroups/systemd" +) + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "pause the container's processes", + Action: pauseAction, +} + +var unpauseCommand = cli.Command{ + Name: "unpause", + Usage: "unpause the container's processes", + Action: unpauseAction, +} + +func pauseAction(context *cli.Context) { + if err := toggle(cgroups.Frozen); err != nil { + log.Fatal(err) + } +} + +func unpauseAction(context *cli.Context) { + if err := toggle(cgroups.Thawed); err != nil { + log.Fatal(err) + } +} + +func toggle(state cgroups.FreezerState) error { + container, err := loadConfig() + if err != nil { + return err + } + + if systemd.UseSystemd() { + err = systemd.Freeze(container.Cgroups, state) + } else { + err = fs.Freeze(container.Cgroups, state) + } + + return err +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/stats.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/stats.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/stats.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/stats.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,39 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer" +) + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "display statistics for the container", + Action: statsAction, +} + +func statsAction(context *cli.Context) { + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + state, err := libcontainer.GetState(dataPath) + if err != nil { + log.Fatal(err) + } + + stats, err := libcontainer.GetStats(container, state) + if err != nil { + log.Fatal(err) + } + data, err := json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", data) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/nsinit/utils.go docker.io-1.3.2~dfsg1/libcontainer/nsinit/utils.go --- docker.io-0.9.1~dfsg1/libcontainer/nsinit/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/nsinit/utils.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,94 @@ +package main + +import ( + "encoding/json" + "log" + "os" + "path/filepath" + + "github.com/codegangsta/cli" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/syncpipe" +) + +// rFunc is a function registration for calling after an execin +type rFunc struct { + Usage string + Action func(*libcontainer.Config, []string) +} + +func loadConfig() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(dataPath, "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + + return container, nil +} + +func openLog(name string) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return err + } + + log.SetOutput(f) + + return nil +} + +func findUserArgs() []string { + i := 0 + for _, a := range os.Args { + i++ + + if a == "--" { + break + } + } + + return os.Args[i:] +} + +// loadConfigFromFd loads a container's config from the sync pipe that is provided by +// fd 3 when running a process +func loadConfigFromFd() (*libcontainer.Config, error) { + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) + if err != nil { + return nil, err + } + + var config *libcontainer.Config + if err := syncPipe.ReadFromParent(&config); err != nil { + return nil, err + } + + return config, nil +} + +func preload(context *cli.Context) error { + if logPath != "" { + if err := openLog(logPath); err != nil { + return err + } + } + + return nil +} + +func runFunc(f *rFunc) { + userArgs := findUserArgs() + + config, err := loadConfigFromFd() + if err != nil { + log.Fatalf("unable to receive config from sync pipe: %s", err) + } + + f.Action(config, userArgs) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/PRINCIPLES.md docker.io-1.3.2~dfsg1/libcontainer/PRINCIPLES.md --- docker.io-0.9.1~dfsg1/libcontainer/PRINCIPLES.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/PRINCIPLES.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,19 @@ +# libcontainer Principles + +In the design and development of libcontainer we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between two options, choose the one that is easier to reverse. +* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The fewer moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/process.go docker.io-1.3.2~dfsg1/libcontainer/process.go --- docker.io-0.9.1~dfsg1/libcontainer/process.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/process.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,27 @@ +package libcontainer + +import "io" + +// Configuration for a process to be run inside a container. +type ProcessConfig struct { + // The command to be run followed by any arguments. + Args []string + + // Map of environment variables to their values. + Env []string + + // Stdin is a pointer to a reader which provides the standard input stream. + // Stdout is a pointer to a writer which receives the standard output stream. + // Stderr is a pointer to a writer which receives the standard error stream. + // + // If a reader or writer is nil, the input stream is assumed to be empty and the output is + // discarded. + // + // The readers and writers, if supplied, are closed when the process terminates. Their Close + // methods should be idempotent. + // + // Stdout and Stderr may refer to the same writer in which case the output is interspersed. + Stdin io.ReadCloser + Stdout io.WriteCloser + Stderr io.WriteCloser +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/README.md docker.io-1.3.2~dfsg1/libcontainer/README.md --- docker.io-0.9.1~dfsg1/libcontainer/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/README.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,62 @@ +## libcontainer - reference implementation for containers [![Build Status](https://travis-ci.org/docker/libcontainer.png?branch=master)](https://travis-ci.org/docker/libcontainer) + +### Note on API changes: + +Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API. + +#### Background + +libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. + + +#### Container +A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system. + +libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container. + + +#### Root file system + +A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree. + + +#### Configuration + +A container is initially configured by supplying configuration data when the container is created. + + +#### nsinit + +`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory. + +To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container. + +See the `sample_configs` folder for examples of what the container configuration should look like. + +To execute `/bin/bash` in the current directory as a container just run the following **as root**: +```bash +nsinit exec /bin/bash +``` + +If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed. + +You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory. + +You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved. + +#### Future +See the [roadmap](ROADMAP.md). + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + +## Hacking on libcontainer + +First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md). + +If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md). + +If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and +"How can I become a maintainer?" in the Contributors' Guide. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/ROADMAP.md docker.io-1.3.2~dfsg1/libcontainer/ROADMAP.md --- docker.io-0.9.1~dfsg1/libcontainer/ROADMAP.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/ROADMAP.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,16 @@ +# libcontainer: what's next? + +This document is a high-level overview of where we want to take libcontainer next. +It is a curated selection of planned improvements which are either important, difficult, or both. + +For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues). + +To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. + +## Broader kernel support + +Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel. + +## Cross-architecture support + +Our goal is to make libcontainer run everywhere. However currently libcontainer only runs on x86_64 systems. We plan on expanding architecture support, so that libcontainer containers can be created and used on more architectures. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/sample_configs/apparmor.json docker.io-1.3.2~dfsg1/libcontainer/sample_configs/apparmor.json --- docker.io-0.9.1~dfsg1/libcontainer/sample_configs/apparmor.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/sample_configs/apparmor.json 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,196 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "apparmor_profile": "docker-default", + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/sample_configs/attach_to_bridge.json docker.io-1.3.2~dfsg1/libcontainer/sample_configs/attach_to_bridge.json --- docker.io-0.9.1~dfsg1/libcontainer/sample_configs/attach_to_bridge.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/sample_configs/attach_to_bridge.json 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,202 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "gateway": "172.17.42.1", + "mtu": 1500, + "type": "veth" + } + ], + "tty": true +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/sample_configs/minimal.json docker.io-1.3.2~dfsg1/libcontainer/sample_configs/minimal.json --- docker.io-0.9.1~dfsg1/libcontainer/sample_configs/minimal.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/sample_configs/minimal.json 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,201 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "mounts": [ + { + "type": "tmpfs", + "destination": "/tmp" + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/sample_configs/README.md docker.io-1.3.2~dfsg1/libcontainer/sample_configs/README.md --- docker.io-0.9.1~dfsg1/libcontainer/sample_configs/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/sample_configs/README.md 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,5 @@ +These configuration files can be used with `nsinit` to quickly develop, test, +and experiment with features of libcontainer. + +When consuming these configuration files, copy them into your rootfs and rename +the file to `container.json` for use with `nsinit`. diff -Nru docker.io-0.9.1~dfsg1/libcontainer/sample_configs/selinux.json docker.io-1.3.2~dfsg1/libcontainer/sample_configs/selinux.json --- docker.io-0.9.1~dfsg1/libcontainer/sample_configs/selinux.json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/sample_configs/selinux.json 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,197 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "mount_config": { + "mount_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475", + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + } + ], + "tty": true, + "user": "daemon" +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/capabilities.go docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/capabilities.go --- docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/capabilities.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/capabilities.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,56 @@ +package capabilities + +import ( + "os" + + "github.com/syndtr/gocapability/capability" +) + +const allCapabilityTypes = capability.CAPS | capability.BOUNDS + +// DropBoundingSet drops the capability bounding set to those specified in the +// container configuration. +func DropBoundingSet(capabilities []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capabilities) + c.Clear(capability.BOUNDS) + c.Set(capability.BOUNDS, keep...) + + if err := c.Apply(capability.BOUNDS); err != nil { + return err + } + + return nil +} + +// DropCapabilities drops all capabilities for the current process except those specified in the container configuration. +func DropCapabilities(capList []string) error { + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + keep := getEnabledCapabilities(capList) + c.Clear(allCapabilityTypes) + c.Set(allCapabilityTypes, keep...) + + if err := c.Apply(allCapabilityTypes); err != nil { + return err + } + return nil +} + +// getEnabledCapabilities returns the capabilities that should not be dropped by the container. +func getEnabledCapabilities(capList []string) []capability.Cap { + keep := []capability.Cap{} + for _, capability := range capList { + if c := GetCapability(capability); c != nil { + keep = append(keep, c.Value) + } + } + return keep +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/types.go docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/types.go --- docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/types.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/types.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,88 @@ +package capabilities + +import "github.com/syndtr/gocapability/capability" + +type ( + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + Capabilities []*CapabilityMapping +) + +func (c *CapabilityMapping) String() string { + return c.Key +} + +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// Contains returns true if the specified Capability is +// in the slice +func (c Capabilities) contains(capp string) bool { + return c.get(capp) != nil +} + +func (c Capabilities) get(capp string) *CapabilityMapping { + for _, cap := range c { + if cap.Key == capp { + return cap + } + } + return nil +} + +var capabilityList = Capabilities{ + {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, + {Key: "MKNOD", Value: capability.CAP_MKNOD}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, + {Key: "CHOWN", Value: capability.CAP_CHOWN}, + {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, + {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, + {Key: "FOWNER", Value: capability.CAP_FOWNER}, + {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, + {Key: "FSETID", Value: capability.CAP_FSETID}, + {Key: "KILL", Value: capability.CAP_KILL}, + {Key: "SETGID", Value: capability.CAP_SETGID}, + {Key: "SETUID", Value: capability.CAP_SETUID}, + {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, + {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, + {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, + {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, + {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, + {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, + {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, + {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, + {Key: "LEASE", Value: capability.CAP_LEASE}, + {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, + {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, + {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/types_test.go docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/types_test.go --- docker.io-0.9.1~dfsg1/libcontainer/security/capabilities/types_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/security/capabilities/types_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,19 @@ +package capabilities + +import ( + "testing" +) + +func TestCapabilitiesContains(t *testing.T) { + caps := Capabilities{ + GetCapability("MKNOD"), + GetCapability("SETPCAP"), + } + + if caps.contains("SYS_ADMIN") { + t.Fatal("capabilities should not contain SYS_ADMIN") + } + if !caps.contains("MKNOD") { + t.Fatal("capabilities should contain MKNOD but does not") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/security/restrict/restrict.go docker.io-1.3.2~dfsg1/libcontainer/security/restrict/restrict.go --- docker.io-0.9.1~dfsg1/libcontainer/security/restrict/restrict.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/security/restrict/restrict.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,53 @@ +// +build linux + +package restrict + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +func mountReadonly(path string) error { + for i := 0; i < 5; i++ { + if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { + switch err { + case syscall.EINVAL: + // Probably not a mountpoint, use bind-mount + if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { + return err + } + + return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") + case syscall.EBUSY: + time.Sleep(100 * time.Millisecond) + continue + default: + return err + } + } + + return nil + } + + return fmt.Errorf("unable to mount %s as readonly max retries reached", path) +} + +// This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). +// However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). +func Restrict(mounts ...string) error { + for _, dest := range mounts { + if err := mountReadonly(dest); err != nil { + return fmt.Errorf("unable to remount %s readonly: %s", dest, err) + } + } + + if err := syscall.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err) + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/security/restrict/unsupported.go docker.io-1.3.2~dfsg1/libcontainer/security/restrict/unsupported.go --- docker.io-0.9.1~dfsg1/libcontainer/security/restrict/unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/security/restrict/unsupported.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,9 @@ +// +build !linux + +package restrict + +import "fmt" + +func Restrict() error { + return fmt.Errorf("not supported") +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/selinux/selinux.go docker.io-1.3.2~dfsg1/libcontainer/selinux/selinux.go --- docker.io-0.9.1~dfsg1/libcontainer/selinux/selinux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/selinux/selinux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,436 @@ +// +build linux + +package selinux + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/libcontainer/system" +) + +const ( + Enforcing = 1 + Permissive = 0 + Disabled = -1 + selinuxDir = "/etc/selinux/" + selinuxConfig = selinuxDir + "config" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + selinuxPath = "/sys/fs/selinux" + xattrNameSelinux = "security.selinux" + stRdOnly = 0x01 +) + +var ( + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`) + mcsList = make(map[string]bool) + selinuxfs = "unknown" + selinuxEnabled = false + selinuxEnabledChecked = false +) + +type SELinuxContext map[string]string + +// SetDisabled disables selinux support for the package +func SetDisabled() { + selinuxEnabled, selinuxEnabledChecked = false, true +} + +func getSelinuxMountPoint() string { + if selinuxfs != "unknown" { + return selinuxfs + } + selinuxfs = "" + + mounts, err := mount.GetMounts() + if err != nil { + return selinuxfs + } + for _, mount := range mounts { + if mount.Fstype == "selinuxfs" { + selinuxfs = mount.Mountpoint + break + } + } + if selinuxfs != "" { + var buf syscall.Statfs_t + syscall.Statfs(selinuxfs, &buf) + if (buf.Flags & stRdOnly) == 1 { + selinuxfs = "" + } + } + return selinuxfs +} + +func SelinuxEnabled() bool { + if selinuxEnabledChecked { + return selinuxEnabled + } + selinuxEnabledChecked = true + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := Getcon(); con != "kernel" { + selinuxEnabled = true + } + } + return selinuxEnabled +} + +func readConfig(target string) (value string) { + var ( + val, key string + bufin *bufio.Reader + ) + + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err != io.EOF { + return "" + } + done = true + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func getSELinuxPolicyRoot() string { + return selinuxDir + readConfig(selinuxTypeTag) +} + +func readCon(name string) (string, error) { + var val string + + in, err := os.Open(name) + if err != nil { + return "", err + } + defer in.Close() + + _, err = fmt.Fscanf(in, "%s", &val) + return val, err +} + +func Setfilecon(path string, scon string) error { + return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0) +} + +// Return the SELinux label for this path +func Getfilecon(path string) (string, error) { + con, err := system.Lgetxattr(path, xattrNameSelinux) + return string(con), err +} + +func Setfscreatecon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon) +} + +func Getfscreatecon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid())) +} + +// Return the SELinux label of the current process thread. +func Getcon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid())) +} + +func Getpidcon(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +func Getexeccon() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid())) +} + +func writeCon(name string, val string) error { + out, err := os.OpenFile(name, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + return err +} + +func Setexeccon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon) +} + +func (c SELinuxContext) Get() string { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) +} + +func NewContext(scon string) SELinuxContext { + c := make(SELinuxContext) + + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + c["level"] = con[3] + } + return c +} + +func ReserveLabel(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsAdd(con[3]) + } +} + +func SelinuxGetEnforce() int { + var enforce int + + enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath)) + if err != nil { + return -1 + } + + enforce, err = strconv.Atoi(string(enforceS)) + if err != nil { + return -1 + } + return enforce +} + +func SelinuxGetEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcsList[mcs] { + return fmt.Errorf("MCS Label already exists") + } + mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + mcsList[mcs] = false +} + +func mcsExists(mcs string) bool { + return mcsList[mcs] +} + +func IntToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD = ORD - TIER + TIER -= 1 + } + TIER = SETSIZE - TIER + ORD = ORD + TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else { + if c1 > c2 { + t := c1 + c1 = c2 + c2 = t + } + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +func FreeLxcContexts(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsDelete(con[3]) + } +} + +func GetLxcContexts() (processLabel string, fileLabel string) { + var ( + val, key string + bufin *bufio.Reader + ) + + if !SelinuxEnabled() { + return "", "" + } + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot()) + in, err := os.Open(lxcPath) + if err != nil { + return "", "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + goto exit + } + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == "process" { + processLabel = strings.Trim(val, "\"") + } + if key == "file" { + fileLabel = strings.Trim(val, "\"") + } + } + } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + +exit: + // mcs := IntToMcs(os.Getpid(), 1024) + mcs := uniqMcs(1024) + scon := NewContext(processLabel) + scon["level"] = mcs + processLabel = scon.Get() + scon = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + return processLabel, fileLabel +} + +func SecurityCheckContext(val string) error { + return writeCon(fmt.Sprintf("%s.context", selinuxPath), val) +} + +func CopyLevel(src, dest string) (string, error) { + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon := NewContext(src) + tcon := NewContext(dest) + mcsDelete(tcon["level"]) + mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// Prevent users from relabing system files +func badPrefix(fpath string) error { + var badprefixes = []string{"/usr"} + + for _, prefix := range badprefixes { + if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) { + return fmt.Errorf("Relabeling content in %s is not allowed.", prefix) + } + } + return nil +} + +// Change the fpath file object to the SELinux label scon. +// If the fpath is a directory and recurse is true Chcon will walk the +// directory tree setting the label +func Chcon(fpath string, scon string, recurse bool) error { + if scon == "" { + return nil + } + if err := badPrefix(fpath); err != nil { + return err + } + callback := func(p string, info os.FileInfo, err error) error { + return Setfilecon(p, scon) + } + + if recurse { + return filepath.Walk(fpath, callback) + } + + return Setfilecon(fpath, scon) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/selinux/selinux_test.go docker.io-1.3.2~dfsg1/libcontainer/selinux/selinux_test.go --- docker.io-0.9.1~dfsg1/libcontainer/selinux/selinux_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/selinux/selinux_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,64 @@ +// +build linux + +package selinux_test + +import ( + "os" + "testing" + + "github.com/docker/libcontainer/selinux" +) + +func testSetfilecon(t *testing.T) { + if selinux.SelinuxEnabled() { + tmp := "selinux_test" + out, _ := os.OpenFile(tmp, os.O_WRONLY, 0) + out.Close() + err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0") + if err != nil { + t.Log("Setfilecon failed") + t.Fatal(err) + } + os.Remove(tmp) + } +} + +func TestSELinux(t *testing.T) { + var ( + err error + plabel, flabel string + ) + + if selinux.SelinuxEnabled() { + t.Log("Enabled") + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + selinux.FreeLxcContexts(plabel) + t.Log("getenforce ", selinux.SelinuxGetEnforce()) + t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) + pid := os.Getpid() + t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + err = selinux.Setfscreatecon("") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + t.Log(selinux.Getpidcon(1)) + } else { + t.Log("Disabled") + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/state.go docker.io-1.3.2~dfsg1/libcontainer/state.go --- docker.io-0.9.1~dfsg1/libcontainer/state.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/state.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,77 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/docker/libcontainer/network" +) + +// State represents a running container's state +type State struct { + // InitPid is the init process id in the parent namespace + InitPid int `json:"init_pid,omitempty"` + + // InitStartTime is the init process start time + InitStartTime string `json:"init_start_time,omitempty"` + + // Network runtime state. + NetworkState network.NetworkState `json:"network_state,omitempty"` + + // Path to all the cgroups setup for a container. Key is cgroup subsystem name. + CgroupPaths map[string]string `json:"cgroup_paths,omitempty"` +} + +// The running state of the container. +type RunState int + +const ( + // The name of the runtime state file + stateFile = "state.json" + + // The container exists and is running. + Running RunState = iota + + // The container exists, it is in the process of being paused. + Pausing + + // The container exists, but all its processes are paused. + Paused + + // The container does not exist. + Destroyed +) + +// SaveState writes the container's runtime state to a state.json file +// in the specified path +func SaveState(basePath string, state *State) error { + f, err := os.Create(filepath.Join(basePath, stateFile)) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(state) +} + +// GetState reads the state.json file for a running container +func GetState(basePath string) (*State, error) { + f, err := os.Open(filepath.Join(basePath, stateFile)) + if err != nil { + return nil, err + } + defer f.Close() + + var state *State + if err := json.NewDecoder(f).Decode(&state); err != nil { + return nil, err + } + + return state, nil +} + +// DeleteState deletes the state.json file +func DeleteState(basePath string) error { + return os.Remove(filepath.Join(basePath, stateFile)) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe.go docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe.go --- docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,105 @@ +package syncpipe + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "syscall" +) + +// SyncPipe allows communication to and from the child processes +// to it's parent and allows the two independent processes to +// syncronize their state. +type SyncPipe struct { + parent, child *os.File +} + +func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) { + s := &SyncPipe{} + + if parentFd > 0 { + s.parent = os.NewFile(parentFd, "parentPipe") + } else if childFd > 0 { + s.child = os.NewFile(childFd, "childPipe") + } else { + return nil, fmt.Errorf("no valid sync pipe fd specified") + } + + return s, nil +} + +func (s *SyncPipe) Child() *os.File { + return s.child +} + +func (s *SyncPipe) Parent() *os.File { + return s.parent +} + +func (s *SyncPipe) SendToChild(v interface{}) error { + data, err := json.Marshal(v) + if err != nil { + return err + } + + s.parent.Write(data) + + return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR) +} + +func (s *SyncPipe) ReadFromChild() error { + data, err := ioutil.ReadAll(s.parent) + if err != nil { + return err + } + + if len(data) > 0 { + return fmt.Errorf("%s", data) + } + + return nil +} + +func (s *SyncPipe) ReadFromParent(v interface{}) error { + data, err := ioutil.ReadAll(s.child) + if err != nil { + return fmt.Errorf("error reading from sync pipe %s", err) + } + + if len(data) > 0 { + if err := json.Unmarshal(data, v); err != nil { + return err + } + } + + return nil +} + +func (s *SyncPipe) ReportChildError(err error) { + // ensure that any data sent from the parent is consumed so it doesn't + // receive ECONNRESET when the child writes to the pipe. + ioutil.ReadAll(s.child) + + s.child.Write([]byte(err.Error())) + s.CloseChild() +} + +func (s *SyncPipe) Close() error { + if s.parent != nil { + s.parent.Close() + } + + if s.child != nil { + s.child.Close() + } + + return nil +} + +func (s *SyncPipe) CloseChild() { + if s.child != nil { + s.child.Close() + s.child = nil + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe_linux.go docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,20 @@ +package syncpipe + +import ( + "os" + "syscall" +) + +func NewSyncPipe() (s *SyncPipe, err error) { + s = &SyncPipe{} + + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return nil, err + } + + s.child = os.NewFile(uintptr(fds[0]), "child syncpipe") + s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe") + + return s, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe_test.go docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe_test.go --- docker.io-0.9.1~dfsg1/libcontainer/syncpipe/sync_pipe_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/syncpipe/sync_pipe_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,72 @@ +package syncpipe + +import ( + "fmt" + "syscall" + "testing" +) + +type testStruct struct { + Name string +} + +func TestSendErrorFromChild(t *testing.T) { + pipe, err := NewSyncPipe() + if err != nil { + t.Fatal(err) + } + defer func() { + if err := pipe.Close(); err != nil { + t.Fatal(err) + } + }() + + childfd, err := syscall.Dup(int(pipe.Child().Fd())) + if err != nil { + t.Fatal(err) + } + childPipe, _ := NewSyncPipeFromFd(0, uintptr(childfd)) + + pipe.CloseChild() + pipe.SendToChild(nil) + + expected := "something bad happened" + childPipe.ReportChildError(fmt.Errorf(expected)) + + childError := pipe.ReadFromChild() + if childError == nil { + t.Fatal("expected an error to be returned but did not receive anything") + } + + if childError.Error() != expected { + t.Fatalf("expected %q but received error message %q", expected, childError.Error()) + } +} + +func TestSendPayloadToChild(t *testing.T) { + pipe, err := NewSyncPipe() + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := pipe.Close(); err != nil { + t.Fatal(err) + } + }() + + expected := "libcontainer" + + if err := pipe.SendToChild(testStruct{Name: expected}); err != nil { + t.Fatal(err) + } + + var s *testStruct + if err := pipe.ReadFromParent(&s); err != nil { + t.Fatal(err) + } + + if s.Name != expected { + t.Fatalf("expected name %q but received %q", expected, s.Name) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/linux.go docker.io-1.3.2~dfsg1/libcontainer/system/linux.go --- docker.io-0.9.1~dfsg1/libcontainer/system/linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,60 @@ +// +build linux + +package system + +import ( + "os/exec" + "syscall" + "unsafe" +) + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + + return syscall.Exec(name, args, env) +} + +func ParentDeathSignal(sig uintptr) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + return err + } + return nil +} + +func GetParentDeathSignal() (int, error) { + var sig int + + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + + if err != 0 { + return -1, err + } + + return sig, nil +} + +func SetKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + return err + } + + return nil +} + +func Setctty() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + return err + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/proc.go docker.io-1.3.2~dfsg1/libcontainer/system/proc.go --- docker.io-0.9.1~dfsg1/libcontainer/system/proc.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/proc.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,27 @@ +package system + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// look in /proc to find the process start time so that we can verify +// that this pid has started after ourself +func GetProcessStartTime(pid int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + + parts := strings.Split(string(data), " ") + // the starttime is located at pos 22 + // from the man page + // + // starttime %llu (was %lu before Linux 2.6) + // (22) The time the process started after system boot. In kernels before Linux 2.6, this + // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks + // (divide by sysconf(_SC_CLK_TCK)). + return parts[22-1], nil // starts at 1 +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/setns_linux.go docker.io-1.3.2~dfsg1/libcontainer/system/setns_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/system/setns_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/setns_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,31 @@ +package system + +import ( + "fmt" + "runtime" + "syscall" +) + +// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 +// +// We need different setns values for the different platforms and arch +// We are declaring the macro here because the SETNS syscall does not exist in th stdlib +var setNsMap = map[string]uintptr{ + "linux/386": 346, + "linux/amd64": 308, + "linux/arm": 374, +} + +func Setns(fd uintptr, flags uintptr) error { + ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + if !exists { + return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + } + + _, _, err := syscall.RawSyscall(ns, fd, flags, 0) + if err != 0 { + return err + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/sysconfig.go docker.io-1.3.2~dfsg1/libcontainer/system/sysconfig.go --- docker.io-0.9.1~dfsg1/libcontainer/system/sysconfig.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/sysconfig.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,12 @@ +// +build cgo + +package system + +/* +#include +*/ +import "C" + +func GetClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/sysconfig_notcgo.go docker.io-1.3.2~dfsg1/libcontainer/system/sysconfig_notcgo.go --- docker.io-0.9.1~dfsg1/libcontainer/system/sysconfig_notcgo.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/sysconfig_notcgo.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,8 @@ +// +build !cgo + +package system + +func GetClockTicks() int { + // TODO figure out a better alternative for platforms where we're missing cgo + return 100 +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/system/xattrs_linux.go docker.io-1.3.2~dfsg1/libcontainer/system/xattrs_linux.go --- docker.io-0.9.1~dfsg1/libcontainer/system/xattrs_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/system/xattrs_linux.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,99 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// Returns the size of xattrs and nil error +// Requires path, takes allocated []byte or nil as last argument +func Llistxattr(path string, dest []byte) (size int, err error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return -1, err + } + var newpathBytes unsafe.Pointer + if len(dest) > 0 { + newpathBytes = unsafe.Pointer(&dest[0]) + } else { + newpathBytes = unsafe.Pointer(&_zero) + } + + _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) + size = int(_size) + if errno != 0 { + return -1, errno + } + + return size, nil +} + +// Returns a []byte slice if the xattr is set and nil otherwise +// Requires path and its attribute as arguments +func Lgetxattr(path string, attr string) ([]byte, error) { + var sz int + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + // Start with a 128 length byte array + sz = 128 + dest := make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + + switch { + case errno == syscall.ENODATA: + return nil, errno + case errno == syscall.ENOTSUP: + return nil, errno + case errno == syscall.ERANGE: + // 128 byte array might just not be good enough, + // A dummy buffer is used ``uintptr(0)`` to get real size + // of the xattrs on disk + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) + sz = int(_sz) + if sz < 0 { + return nil, errno + } + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno != 0 { + return nil, errno + } + case errno != 0: + return nil, errno + } + sz = int(_sz) + return dest[:sz], nil +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/.travis.yml docker.io-1.3.2~dfsg1/libcontainer/.travis.yml --- docker.io-0.9.1~dfsg1/libcontainer/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/.travis.yml 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,36 @@ +language: go +go: 1.3 + +# let us have pretty experimental Docker-based Travis workers +sudo: false + +env: + - TRAVIS_GLOBAL_WTF=1 + - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1 + - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0 +# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061) + - _GOOS=linux _GOARCH=386 CGO_ENABLED=0 + - _GOOS=linux _GOARCH=arm CGO_ENABLED=0 + +install: + - go get code.google.com/p/go.tools/cmd/cover + - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer" + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then + gvm cross "$_GOOS" "$_GOARCH"; + export GOOS="$_GOOS" GOARCH="$_GOARCH"; + fi + - export GOPATH="$GOPATH:$(pwd)/vendor" + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi + - go get -d -v ./... # TODO remove this if /docker/docker gets purged from our includes + - if [ "$TRAVIS_GLOBAL_WTF" ]; then + export DOCKER_PATH="${GOPATH%%:*}/src/github.com/docker/docker"; + mkdir -p "$DOCKER_PATH/hack/make"; + ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/docker/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} ); + sed -i 's!docker/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate"; + fi + +script: + - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi + - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi + - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then make direct-build; fi + - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then make direct-test-short; fi diff -Nru docker.io-0.9.1~dfsg1/libcontainer/types.go docker.io-1.3.2~dfsg1/libcontainer/types.go --- docker.io-0.9.1~dfsg1/libcontainer/types.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/types.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,11 @@ +package libcontainer + +import ( + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/network" +) + +type ContainerStats struct { + NetworkStats *network.NetworkStats `json:"network_stats,omitempty"` + CgroupStats *cgroups.Stats `json:"cgroup_stats,omitempty"` +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/update-vendor.sh docker.io-1.3.2~dfsg1/libcontainer/update-vendor.sh --- docker.io-0.9.1~dfsg1/libcontainer/update-vendor.sh 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/update-vendor.sh 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")" + +# Downloads dependencies into vendor/ directory +mkdir -p vendor +cd vendor + +clone() { + vcs=$1 + pkg=$2 + rev=$3 + + pkg_url=https://$pkg + target_dir=src/$pkg + + echo -n "$pkg @ $rev: " + + if [ -d $target_dir ]; then + echo -n 'rm old, ' + rm -fr $target_dir + fi + + echo -n 'clone, ' + case $vcs in + git) + git clone --quiet --no-checkout $pkg_url $target_dir + ( cd $target_dir && git reset --quiet --hard $rev ) + ;; + hg) + hg clone --quiet --updaterev $rev $pkg_url $target_dir + ;; + esac + + echo -n 'rm VCS, ' + ( cd $target_dir && rm -rf .{git,hg} ) + + echo done +} + +# the following lines are in sorted order, FYI +clone git github.com/codegangsta/cli 1.1.0 +clone git github.com/coreos/go-systemd v2 +clone git github.com/godbus/dbus v1 +clone git github.com/syndtr/gocapability 3c85049eae + +# intentionally not vendoring Docker itself... that'd be a circle :) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/user/MAINTAINERS docker.io-1.3.2~dfsg1/libcontainer/user/MAINTAINERS --- docker.io-0.9.1~dfsg1/libcontainer/user/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/user/MAINTAINERS 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff -Nru docker.io-0.9.1~dfsg1/libcontainer/user/user.go docker.io-1.3.2~dfsg1/libcontainer/user/user.go --- docker.io-0.9.1~dfsg1/libcontainer/user/user.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/user/user.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,258 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +func parseLine(line string, v ...interface{}) { + if line == "" { + return + } + + parts := strings.Split(line, ":") + for i, p := range parts { + if len(v) <= i { + // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files + break + } + + switch e := v[i].(type) { + case *string: + // "root", "adm", "/bin/bash" + *e = p + case *int: + // "0", "4", "1000" + // ignore string to int conversion errors, for great "tolerance" of naughty configuration files + *e, _ = strconv.Atoi(p) + case *[]string: + // "", "root", "root,adm,daemon" + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // panic, because this is a programming/logic error, not a runtime one + panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") + } + } +} + +func ParsePasswd() ([]*User, error) { + return ParsePasswdFilter(nil) +} + +func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { + f, err := os.Open("/etc/passwd") + if err != nil { + return nil, err + } + defer f.Close() + return parsePasswdFile(f, filter) +} + +func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { + var ( + s = bufio.NewScanner(r) + out = []*User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := &User{} + parseLine( + text, + &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroup() ([]*Group, error) { + return ParseGroupFilter(nil) +} + +func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { + f, err := os.Open("/etc/group") + if err != nil { + return nil, err + } + defer f.Close() + return parseGroupFile(f, filter) +} + +func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { + var ( + s = bufio.NewScanner(r) + out = []*Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := &Group{} + parseLine( + text, + &p.Name, &p.Pass, &p.Gid, &p.List, + ) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, list of supplementary group IDs, and home directory, if available and/or applicable. +func GetUserGroupSupplementaryHome(userSpec string, defaultUid, defaultGid int, defaultHome string) (int, int, []int, string, error) { + var ( + uid = defaultUid + gid = defaultGid + suppGids = []int{} + home = defaultHome + + userArg, groupArg string + ) + + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax + parseLine(userSpec, &userArg, &groupArg) + + users, err := ParsePasswdFilter(func(u *User) bool { + if userArg == "" { + return u.Uid == uid + } + return u.Name == userArg || strconv.Itoa(u.Uid) == userArg + }) + if err != nil && !os.IsNotExist(err) { + if userArg == "" { + userArg = strconv.Itoa(uid) + } + return 0, 0, nil, "", fmt.Errorf("Unable to find user %v: %v", userArg, err) + } + + haveUser := users != nil && len(users) > 0 + if haveUser { + // if we found any user entries that matched our filter, let's take the first one as "correct" + uid = users[0].Uid + gid = users[0].Gid + home = users[0].Home + } else if userArg != "" { + // we asked for a user but didn't find them... let's check to see if we wanted a numeric user + uid, err = strconv.Atoi(userArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, "", fmt.Errorf("Unable to find user %v", userArg) + } + if uid < minId || uid > maxId { + return 0, 0, nil, "", ErrRange + } + + // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit + } + + if groupArg != "" || (haveUser && users[0].Name != "") { + groups, err := ParseGroupFilter(func(g *Group) bool { + if groupArg != "" { + return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg + } + for _, u := range g.List { + if u == users[0].Name { + return true + } + } + return false + }) + if err != nil && !os.IsNotExist(err) { + return 0, 0, nil, "", fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + } + + haveGroup := groups != nil && len(groups) > 0 + if groupArg != "" { + if haveGroup { + // if we found any group entries that matched our filter, let's take the first one as "correct" + gid = groups[0].Gid + } else { + // we asked for a group but didn't find id... let's check to see if we wanted a numeric group + gid, err = strconv.Atoi(groupArg) + if err != nil { + // not numeric - we have to bail + return 0, 0, nil, "", fmt.Errorf("Unable to find group %v", groupArg) + } + if gid < minId || gid > maxId { + return 0, 0, nil, "", ErrRange + } + + // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit + } + } else if haveGroup { + suppGids = make([]int, len(groups)) + for i, group := range groups { + suppGids[i] = group.Gid + } + } + } + + return uid, gid, suppGids, home, nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/user/user_test.go docker.io-1.3.2~dfsg1/libcontainer/user/user_test.go --- docker.io-0.9.1~dfsg1/libcontainer/user/user_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/user/user_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,94 @@ +package user + +import ( + "strings" + "testing" +) + +func TestUserParseLine(t *testing.T) { + var ( + a, b string + c []string + d int + ) + + parseLine("", &a, &b) + if a != "" || b != "" { + t.Fatalf("a and b should be empty ('%v', '%v')", a, b) + } + + parseLine("a", &a, &b) + if a != "a" || b != "" { + t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) + } + + parseLine("bad boys:corny cows", &a, &b) + if a != "bad boys" || b != "corny cows" { + t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) + } + + parseLine("", &c) + if len(c) != 0 { + t.Fatalf("c should be empty (%#v)", c) + } + + parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) + if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { + t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("::::::::::", &a, &b, &c) + if a != "" || b != "" || len(c) != 0 { + t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) + } + + parseLine("not a number", &d) + if d != 0 { + t.Fatalf("d should be 0 (%v)", d) + } + + parseLine("b:12:c", &a, &d, &b) + if a != "b" || b != "c" || d != 12 { + t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) + } +} + +func TestUserParsePasswd(t *testing.T) { + users, err := parsePasswdFile(strings.NewReader(` +root:x:0:0:root:/root:/bin/bash +adm:x:3:4:adm:/var/adm:/bin/false +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(users) != 3 { + t.Fatalf("Expected 3 users, got %v", len(users)) + } + if users[0].Uid != 0 || users[0].Name != "root" { + t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) + } + if users[1].Uid != 3 || users[1].Name != "adm" { + t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) + } +} + +func TestUserParseGroup(t *testing.T) { + groups, err := parseGroupFile(strings.NewReader(` +root:x:0:root +adm:x:4:root,adm,daemon +this is just some garbage data +`), nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(groups) != 3 { + t.Fatalf("Expected 3 groups, got %v", len(groups)) + } + if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { + t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) + } + if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { + t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) + } +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/utils/utils.go docker.io-1.3.2~dfsg1/libcontainer/utils/utils.go --- docker.io-0.9.1~dfsg1/libcontainer/utils/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/utils/utils.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,55 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "syscall" +) + +// GenerateRandomName returns a new name joined with a prefix. This size +// specified is used to truncate the randomly generated value +func GenerateRandomName(prefix string, size int) (string, error) { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + return "", err + } + return prefix + hex.EncodeToString(id)[:size], nil +} + +// ResolveRootfs ensures that the current working directory is +// not a symlink and returns the absolute path to the rootfs +func ResolveRootfs(uncleanRootfs string) (string, error) { + rootfs, err := filepath.Abs(uncleanRootfs) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(rootfs) +} + +func CloseExecFrom(minFd int) error { + fdList, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return err + } + for _, fi := range fdList { + fd, err := strconv.Atoi(fi.Name()) + if err != nil { + // ignore non-numeric file names + continue + } + + if fd < minFd { + // ignore descriptors lower than our specified minimum + continue + } + + // intentionally ignore errors from syscall.CloseOnExec + syscall.CloseOnExec(fd) + // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/xattr/xattr.go docker.io-1.3.2~dfsg1/libcontainer/xattr/xattr.go --- docker.io-0.9.1~dfsg1/libcontainer/xattr/xattr.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/xattr/xattr.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,53 @@ +// +build linux + +package xattr + +import ( + "syscall" + + "github.com/docker/libcontainer/system" +) + +func XattrEnabled(path string) bool { + if Setxattr(path, "user.test", "") == syscall.ENOTSUP { + return false + } + return true +} + +func stringsfromByte(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} + +func Listxattr(path string) ([]string, error) { + size, err := system.Llistxattr(path, nil) + if err != nil { + return nil, err + } + buf := make([]byte, size) + read, err := system.Llistxattr(path, buf) + if err != nil { + return nil, err + } + names := stringsfromByte(buf[:read]) + return names, nil +} + +func Getxattr(path, attr string) (string, error) { + value, err := system.Lgetxattr(path, attr) + if err != nil { + return "", err + } + return string(value), nil +} + +func Setxattr(path, xattr, value string) error { + return system.Lsetxattr(path, xattr, []byte(value), 0) +} diff -Nru docker.io-0.9.1~dfsg1/libcontainer/xattr/xattr_test.go docker.io-1.3.2~dfsg1/libcontainer/xattr/xattr_test.go --- docker.io-0.9.1~dfsg1/libcontainer/xattr/xattr_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libcontainer/xattr/xattr_test.go 2014-10-14 06:23:12.000000000 +0000 @@ -0,0 +1,77 @@ +// +build linux + +package xattr_test + +import ( + "os" + "testing" + + "github.com/docker/libcontainer/xattr" +) + +func testXattr(t *testing.T) { + tmp := "xattr_test" + out, err := os.OpenFile(tmp, os.O_WRONLY, 0) + if err != nil { + t.Fatal("failed") + } + attr := "user.test" + out.Close() + + if !xattr.XattrEnabled(tmp) { + t.Log("Disabled") + t.Fatal("failed") + } + t.Log("Success") + + err = xattr.Setxattr(tmp, attr, "test") + if err != nil { + t.Fatal("failed") + } + + var value string + value, err = xattr.Getxattr(tmp, attr) + if err != nil { + t.Fatal("failed") + } + if value != "test" { + t.Fatal("failed") + } + t.Log("Success") + + var names []string + names, err = xattr.Listxattr(tmp) + if err != nil { + t.Fatal("failed") + } + + var found int + for _, name := range names { + if name == attr { + found = 1 + } + } + // Listxattr doesn't return trusted.* and system.* namespace + // attrs when run in unprevileged mode. + if found != 1 { + t.Fatal("failed") + } + t.Log("Success") + + big := "0000000000000000000000000000000000000000000000000000000000000000000008c6419ad822dfe29283fb3ac98dcc5908810cb31f4cfe690040c42c144b7492eicompslf20dxmlpgz" + // Test for long xattrs larger than 128 bytes + err = xattr.Setxattr(tmp, attr, big) + if err != nil { + t.Fatal("failed to add long value") + } + value, err = xattr.Getxattr(tmp, attr) + if err != nil { + t.Fatal("failed to get long value") + } + t.Log("Success") + + if value != big { + t.Fatal("failed, value doesn't match") + } + t.Log("Success") +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/certificates.go docker.io-1.3.2~dfsg1/libtrust/certificates.go --- docker.io-0.9.1~dfsg1/libtrust/certificates.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/certificates.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/certificates_test.go docker.io-1.3.2~dfsg1/libtrust/certificates_test.go --- docker.io-0.9.1~dfsg1/libtrust/certificates_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/certificates_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,111 @@ +package libtrust + +import ( + "encoding/pem" + "io/ioutil" + "net" + "os" + "path" + "testing" +) + +func TestGenerateCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedClientCert(key) + if err != nil { + t.Fatal(err) + } +} + +func TestGenerateCACertPool(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) + if err != nil { + t.Fatal(err) + } +} + +func TestLoadCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cert1, err := GenerateCACert(caKey1, key) + if err != nil { + t.Fatal(err) + } + cert2, err := GenerateCACert(caKey2, key) + if err != nil { + t.Fatal(err) + } + + d, err := ioutil.TempDir("/tmp", "cert-test") + if err != nil { + t.Fatal(err) + } + caFile := path.Join(d, "ca.pem") + f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) + if err != nil { + t.Fatal(err) + } + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) + if err != nil { + t.Fatal(err) + } + f.Close() + + certs, err := LoadCertificateBundle(caFile) + if err != nil { + t.Fatal(err) + } + if len(certs) != 2 { + t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) + } + + pool, err := LoadCertificatePool(caFile) + if err != nil { + t.Fatal(err) + } + + if len(pool.Subjects()) != 2 { + t.Fatalf("Invalid certificate pool") + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/CONTRIBUTING.md docker.io-1.3.2~dfsg1/libtrust/CONTRIBUTING.md --- docker.io-0.9.1~dfsg1/libtrust/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/CONTRIBUTING.md 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff -Nru docker.io-0.9.1~dfsg1/libtrust/doc.go docker.io-1.3.2~dfsg1/libtrust/doc.go --- docker.io-0.9.1~dfsg1/libtrust/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/doc.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff -Nru docker.io-0.9.1~dfsg1/libtrust/ec_key.go docker.io-1.3.2~dfsg1/libtrust/ec_key.go --- docker.io-0.9.1~dfsg1/libtrust/ec_key.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/ec_key.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,437 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + // Generate and return a libtrust fingerprint of the EC public key. + // For an EC key this should be: + // SHA256("EC"+curveName+bytes(X)+bytes(Y)) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + hasher := crypto.SHA256.New() + hasher.Write([]byte(k.KeyType() + k.CurveName())) + hasher.Write(k.X.Bytes()) + hasher.Write(k.Y.Bytes()) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/ec_key_test.go docker.io-1.3.2~dfsg1/libtrust/ec_key_test.go --- docker.io-0.9.1~dfsg1/libtrust/ec_key_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/ec_key_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "testing" +) + +func generateECTestKeys(t *testing.T) []PrivateKey { + p256Key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + p384Key, err := GenerateECP384PrivateKey() + if err != nil { + t.Fatal(err) + } + + p521Key, err := GenerateECP521PrivateKey() + if err != nil { + t.Fatal(err) + } + + return []PrivateKey{p256Key, p384Key, p521Key} +} + +func TestECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + if ecKey.KeyType() != "EC" { + t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) + } + } +} + +func TestECSignVerify(t *testing.T) { + ecKeys := generateECTestKeys(t) + + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = ecKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + cryptoPrivateKey := ecKey.CryptoPrivateKey() + cryptoPublicKey := ecKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} + +func TestExtendedFields(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + key.AddExtendedField("test", "foobar") + val := key.GetExtendedField("test") + + gotVal, ok := val.(string) + if !ok { + t.Fatalf("value is not a string") + } else if gotVal != val { + t.Fatalf("value %q is not equal to %q", gotVal, val) + } + +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/filter.go docker.io-1.3.2~dfsg1/libtrust/filter.go --- docker.io-0.9.1~dfsg1/libtrust/filter.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/filter.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,44 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + hosts, ok := pubKey.GetExtendedField("hosts").([]interface{}) + + if !ok || (ok && len(hosts) == 0) { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostVal := range hosts { + hostPattern, ok := hostVal.(string) + if !ok { + continue + } + + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + + } + + return filtered, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/filter_test.go docker.io-1.3.2~dfsg1/libtrust/filter_test.go --- docker.io-0.9.1~dfsg1/libtrust/filter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/filter_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,79 @@ +package libtrust + +import ( + "testing" +) + +func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { + if len(sliceA) != len(sliceB) { + t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) + } + + for i, itemA := range sliceA { + itemB := sliceB[i] + if itemA != itemB { + t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) + } + } +} + +func TestFilter(t *testing.T) { + keys := make([]PublicKey, 0, 8) + + // Create 8 keys and add host entries. + for i := 0; i < cap(keys); i++ { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + switch { + case i == 0: + // Don't add entries for this key, key 0. + break + case i%2 == 0: + // Should catch keys 2, 4, and 6. + key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) + case i == 7: + // Should catch only the last key, and make it match any hostname. + key.AddExtendedField("hosts", []interface{}{"*"}) + default: + // should catch keys 1, 3, 5. + key.AddExtendedField("hosts", []interface{}{"*.example.com"}) + } + + keys = append(keys, key) + } + + // Should match 2 keys, the empty one, and the one that matches all hosts. + matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) + if err != nil { + t.Fatal(err) + } + expectedMatch := []PublicKey{keys[0], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match 1 key, the one that matches any host. + matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match keys that end in "example.com", and the key that matches anything. + matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match all of the keys except the empty key. + matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = keys[1:] + compareKeySlices(t, expectedMatch, matchedKeys) +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/hash.go docker.io-1.3.2~dfsg1/libtrust/hash.go --- docker.io-0.9.1~dfsg1/libtrust/hash.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/hash.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/jsonsign.go docker.io-1.3.2~dfsg1/libtrust/jsonsign.go --- docker.io-0.9.1~dfsg1/libtrust/jsonsign.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/jsonsign.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,566 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header *jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []*jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]*jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := &jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + } + sig := &jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + } + + js.signatures = append(js.signatures, sig) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := &jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + sig := &jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + } + + js.signatures = append(js.signatures, sig) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header *jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []*jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]*jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := &jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = &jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +func NewJSONSignature(content []byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]*jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := &jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = &jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/jsonsign_test.go docker.io-1.3.2~dfsg1/libtrust/jsonsign_test.go --- docker.io-0.9.1~dfsg1/libtrust/jsonsign_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/jsonsign_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,297 @@ +package libtrust + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "fmt" + "testing" + + "github.com/docker/libtrust/testutil" +) + +func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { + testMap := map[string]interface{}{ + "name": "dmcgowan/mycontainer", + "config": map[string]interface{}{ + "ports": []int{9101, 9102}, + "run": "/bin/echo \"Hello\"", + }, + "layers": []string{ + "2893c080-27f5-11e4-8c21-0800200c9a66", + "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", + "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", + "0b6da891-7f7f-4abf-9c97-7887549e696c", + "1d960389-ae4f-4011-85fd-18d0f96a67ad", + }, + } + formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` + formattedSection = fmt.Sprintf(formattedSection, sigKey) + if indent != "" { + buf := bytes.NewBuffer(nil) + json.Indent(buf, []byte(formattedSection), "", indent) + return testMap, buf.Bytes() + } + return testMap, []byte(formattedSection) + +} + +func TestSignJSON(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + +} + +func TestSignMap(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func TestFormattedJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + + var unmarshalled map[string]interface{} + err = json.Unmarshal(b, &unmarshalled) + if err != nil { + t.Fatalf("Could not unmarshall after parse: %s", err) + } + +} + +func TestFormattedFlatJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", "") + unindented, err := json.Marshal(testMap) + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(unindented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { + parent := ca + parentKey := key + chain := make([]*x509.Certificate, 6) + for i := 5; i > 0; i-- { + intermediatekey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + return trustKey, chain +} + +func TestChainVerify(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err != nil { + t.Fatalf("Error verifying content: %s", err) + } + if len(chains) != 1 { + t.Fatalf("Unexpected chains length: %d", len(chains)) + } + if len(chains[0]) != 7 { + t.Fatalf("Unexpected chain length: %d", len(chains[0])) + } +} + +func TestInvalidChain(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain[:5]) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err == nil { + t.Fatalf("Expected error verifying with bad chain") + } + if len(chains) != 0 { + t.Fatalf("Unexpected chains returned from invalid verify") + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/key_files.go docker.io-1.3.2~dfsg1/libtrust/key_files.go --- docker.io-0.9.1~dfsg1/libtrust/key_files.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/key_files.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/key_files_test.go docker.io-1.3.2~dfsg1/libtrust/key_files_test.go --- docker.io-0.9.1~dfsg1/libtrust/key_files_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/key_files_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,220 @@ +package libtrust + +import ( + "errors" + "io/ioutil" + "os" + "testing" +) + +func makeTempFile(t *testing.T, prefix string) (filename string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + t.Fatal(err) + } + + filename = file.Name() + file.Close() + + return +} + +func TestKeyFiles(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) + + key, err = GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) +} + +func testKeyFiles(t *testing.T, key PrivateKey) { + var err error + + privateKeyFilename := makeTempFile(t, "private_key") + privateKeyFilenamePEM := privateKeyFilename + ".pem" + privateKeyFilenameJWK := privateKeyFilename + ".jwk" + + publicKeyFilename := makeTempFile(t, "public_key") + publicKeyFilenamePEM := publicKeyFilename + ".pem" + publicKeyFilenameJWK := publicKeyFilename + ".jwk" + + if err = SaveKey(privateKeyFilenamePEM, key); err != nil { + t.Fatal(err) + } + + if err = SaveKey(privateKeyFilenameJWK, key); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { + t.Fatal(err) + } + + loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + if key.KeyID() != loadedPEMKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedPEMPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + os.Remove(privateKeyFilename) + os.Remove(privateKeyFilenamePEM) + os.Remove(privateKeyFilenameJWK) + os.Remove(publicKeyFilename) + os.Remove(publicKeyFilenamePEM) + os.Remove(publicKeyFilenameJWK) +} + +func TestTrustedHostKeysFile(t *testing.T) { + trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") + trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" + trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" + + testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) + testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) + + os.Remove(trustedHostKeysFilename) + os.Remove(trustedHostKeysFilenamePEM) + os.Remove(trustedHostKeysFilenameJWK) +} + +func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { + hostAddress1 := "docker.example.com:2376" + hostKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey1.AddExtendedField("hosts", []string{hostAddress1}) + err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %s\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + + hostAddress2 := "192.168.59.103:2376" + hostKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey2.AddExtendedField("hosts", hostAddress2) + err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %s\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + +} + +func TestTrustedClientKeysFile(t *testing.T) { + trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") + trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" + trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" + + testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) + testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) + + os.Remove(trustedClientKeysFilename) + os.Remove(trustedClientKeysFilenamePEM) + os.Remove(trustedClientKeysFilenameJWK) +} + +func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { + clientKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } + + clientKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/key.go docker.io-1.3.2~dfsg1/libtrust/key.go --- docker.io-0.9.1~dfsg1/libtrust/key.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/key.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/LICENSE docker.io-1.3.2~dfsg1/libtrust/LICENSE --- docker.io-0.9.1~dfsg1/libtrust/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/LICENSE 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru docker.io-0.9.1~dfsg1/libtrust/MAINTAINERS docker.io-1.3.2~dfsg1/libtrust/MAINTAINERS --- docker.io-0.9.1~dfsg1/libtrust/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/MAINTAINERS 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff -Nru docker.io-0.9.1~dfsg1/libtrust/README.md docker.io-1.3.2~dfsg1/libtrust/README.md --- docker.io-0.9.1~dfsg1/libtrust/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/README.md 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff -Nru docker.io-0.9.1~dfsg1/libtrust/rsa_key.go docker.io-1.3.2~dfsg1/libtrust/rsa_key.go --- docker.io-0.9.1~dfsg1/libtrust/rsa_key.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/rsa_key.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,436 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + // Generate and return a 'libtrust' fingerprint of the RSA public key. + // For an RSA key this should be: + // SHA256("RSA"+bytes(N)+bytes(E)) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + hasher := crypto.SHA256.New() + hasher.Write([]byte(k.KeyType())) + hasher.Write(k.N.Bytes()) + hasher.Write(serializeRSAPublicExponentParam(k.E)) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/rsa_key_test.go docker.io-1.3.2~dfsg1/libtrust/rsa_key_test.go --- docker.io-0.9.1~dfsg1/libtrust/rsa_key_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/rsa_key_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "log" + "testing" +) + +var rsaKeys []PrivateKey + +func init() { + var err error + rsaKeys, err = generateRSATestKeys() + if err != nil { + log.Fatal(err) + } +} + +func generateRSATestKeys() (keys []PrivateKey, err error) { + log.Println("Generating RSA 2048-bit Test Key") + rsa2048Key, err := GenerateRSA2048PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 3072-bit Test Key") + rsa3072Key, err := GenerateRSA3072PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 4096-bit Test Key") + rsa4096Key, err := GenerateRSA4096PrivateKey() + if err != nil { + return + } + + log.Println("Done generating RSA Test Keys!") + keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} + + return +} + +func TestRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + if rsaKey.KeyType() != "RSA" { + t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) + } + } +} + +func TestRSASignVerify(t *testing.T) { + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = rsaKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalRSAKeys(t *testing.T) { + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + + // It's a good idea to validate the Private Key to make sure our + // (un)marshal process didn't corrupt the extra parameters. + k := privKey2.(*rsaPrivateKey) + err = k.PrivateKey.Validate() + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + cryptoPrivateKey := rsaKey.CryptoPrivateKey() + cryptoPublicKey := rsaKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/testutil/certificates.go docker.io-1.3.2~dfsg1/libtrust/testutil/certificates.go --- docker.io-0.9.1~dfsg1/libtrust/testutil/certificates.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/testutil/certificates.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,94 @@ +package testutil + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// GenerateTrustCA generates a new certificate authority for testing. +func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "CA Root", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateIntermediate generates an intermediate certificate for testing using +// the parent certificate (likely a CA) and the provided keys. +func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Intermediate", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateTrustCert generates a new trust certificate for testing. Unlike the +// intermediate certificates, this certificate should be used for signature +// only, not creating certificates. +func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Trust Cert", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/tlsdemo/client.go docker.io-1.3.2~dfsg1/libtrust/tlsdemo/client.go --- docker.io-0.9.1~dfsg1/libtrust/tlsdemo/client.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/tlsdemo/client.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,89 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + // Load Client Key. + clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate Client Certificate. + selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) + if err != nil { + log.Fatal(err) + } + + // Load trusted host keys. + hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + // Ensure the host we want to connect to is trusted! + host, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) + if err != nil { + log.Fatalf("%q is not a known and trusted host", host) + } + + // Generate a CA pool with the trusted host's key. + caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) + if err != nil { + log.Fatal(err) + } + + // Create HTTP Client. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedClientCert.Raw}, + PrivateKey: clientKey.CryptoPrivateKey(), + Leaf: selfSignedClientCert, + }, + }, + RootCAs: caPool, + }, + }, + } + + var makeRequest = func(url string) { + resp, err := client.Get(url) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Fatal(err) + } + + log.Println(resp.Status) + log.Println(string(body)) + } + + // Make the request to the trusted server! + makeRequest(fmt.Sprintf("https://%s", serverAddress)) +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/tlsdemo/gencert.go docker.io-1.3.2~dfsg1/libtrust/tlsdemo/gencert.go --- docker.io-0.9.1~dfsg1/libtrust/tlsdemo/gencert.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/tlsdemo/gencert.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/pem" + "fmt" + "log" + "net" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + clientPrivateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) + if err != nil { + log.Fatal(err) + } + + keyPEMBlock, err := key.PEMBlock() + if err != nil { + log.Fatal(err) + } + + encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) + fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) + + cert, err := libtrust.GenerateSelfSignedClientCert(key) + if err != nil { + log.Fatal(err) + } + + encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) + + trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + hostname, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + + trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) + if err != nil { + log.Fatal(err) + } + + caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) + if err != nil { + log.Fatal(err) + } + + encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) + fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/tlsdemo/genkeys.go docker.io-1.3.2~dfsg1/libtrust/tlsdemo/genkeys.go --- docker.io-0.9.1~dfsg1/libtrust/tlsdemo/genkeys.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/tlsdemo/genkeys.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,61 @@ +package main + +import ( + "log" + + "github.com/docker/libtrust" +) + +func main() { + // Generate client key. + clientKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Add a comment for the client key. + clientKey.AddExtendedField("comment", "TLS Demo Client") + + // Save the client key, public and private versions. + err = libtrust.SaveKey("client_data/private_key.pem", clientKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate server key. + serverKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Set the list of addresses to use for the server. + serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) + + // Save the server key, public and private versions. + err = libtrust.SaveKey("server_data/private_key.pem", serverKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Authorized Keys file for server. + err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Known Host Keys file for client. + err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/tlsdemo/README.md docker.io-1.3.2~dfsg1/libtrust/tlsdemo/README.md --- docker.io-0.9.1~dfsg1/libtrust/tlsdemo/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/tlsdemo/README.md 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,50 @@ +## Libtrust TLS Config Demo + +This program generates key pairs and trust files for a TLS client and server. + +To generate the keys, run: + +``` +$ go run genkeys.go +``` + +The generated files are: + +``` +$ ls -l client_data/ server_data/ +client_data/: +total 24 +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json + +server_data/: +total 24 +-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +``` + +The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. + +To start the server, run: + +``` +$ go run server.go +``` + +This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. + +To make a request using the client, run: + +``` +$ go run client.go +``` + +This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. + +The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). + +``` +curl --cert cert.pem --key key.pem -k https://localhost:8888 +``` diff -Nru docker.io-0.9.1~dfsg1/libtrust/tlsdemo/server.go docker.io-1.3.2~dfsg1/libtrust/tlsdemo/server.go --- docker.io-0.9.1~dfsg1/libtrust/tlsdemo/server.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/tlsdemo/server.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,80 @@ +package main + +import ( + "crypto/tls" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "server_data/private_key.pem" + authorizedClientsFilename = "server_data/trusted_clients.pem" +) + +func requestHandler(w http.ResponseWriter, r *http.Request) { + clientCert := r.TLS.PeerCertificates[0] + keyID := clientCert.Subject.CommonName + log.Printf("Request from keyID: %s\n", keyID) + fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) +} + +func main() { + // Load server key. + serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate server certificate. + selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( + serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, + ) + if err != nil { + log.Fatal(err) + } + + // Load authorized client keys. + authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) + if err != nil { + log.Fatal(err) + } + + // Create CA pool using trusted client keys. + caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) + if err != nil { + log.Fatal(err) + } + + // Create TLS config, requiring client certificates. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedServerCert.Raw}, + PrivateKey: serverKey.CryptoPrivateKey(), + Leaf: selfSignedServerCert, + }, + }, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: caPool, + } + + // Create HTTP server with simple request handler. + server := &http.Server{ + Addr: serverAddress, + Handler: http.HandlerFunc(requestHandler), + } + + // Listen and server HTTPS using the libtrust TLS config. + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + log.Fatal(err) + } + tlsListener := tls.NewListener(listener, tlsConfig) + server.Serve(tlsListener) +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/trustgraph/graph.go docker.io-1.3.2~dfsg1/libtrust/trustgraph/graph.go --- docker.io-0.9.1~dfsg1/libtrust/trustgraph/graph.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/trustgraph/graph.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,50 @@ +package trustgraph + +import "github.com/docker/libtrust" + +// TrustGraph represents a graph of authorization mapping +// public keys to nodes and grants between nodes. +type TrustGraph interface { + // Verifies that the given public key is allowed to perform + // the given action on the given node according to the trust + // graph. + Verify(libtrust.PublicKey, string, uint16) (bool, error) + + // GetGrants returns an array of all grant chains which are used to + // allow the requested permission. + GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) +} + +// Grant represents a transfer of permission from one part of the +// trust graph to another. This is the only way to delegate +// permission between two different sub trees in the graph. +type Grant struct { + // Subject is the namespace being granted + Subject string + + // Permissions is a bit map of permissions + Permission uint16 + + // Grantee represents the node being granted + // a permission scope. The grantee can be + // either a namespace item or a key id where namespace + // items will always start with a '/'. + Grantee string + + // statement represents the statement used to create + // this object. + statement *Statement +} + +// Permissions +// Read node 0x01 (can read node, no sub nodes) +// Write node 0x02 (can write to node object, cannot create subnodes) +// Read subtree 0x04 (delegates read to each sub node) +// Write subtree 0x08 (delegates write to each sub node, included create on the subject) +// +// Permission shortcuts +// ReadItem = 0x01 +// WriteItem = 0x03 +// ReadAccess = 0x07 +// WriteAccess = 0x0F +// Delegate = 0x0F diff -Nru docker.io-0.9.1~dfsg1/libtrust/trustgraph/memory_graph.go docker.io-1.3.2~dfsg1/libtrust/trustgraph/memory_graph.go --- docker.io-0.9.1~dfsg1/libtrust/trustgraph/memory_graph.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/trustgraph/memory_graph.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,133 @@ +package trustgraph + +import ( + "strings" + + "github.com/docker/libtrust" +) + +type grantNode struct { + grants []*Grant + children map[string]*grantNode +} + +type memoryGraph struct { + roots map[string]*grantNode +} + +func newGrantNode() *grantNode { + return &grantNode{ + grants: []*Grant{}, + children: map[string]*grantNode{}, + } +} + +// NewMemoryGraph returns a new in memory trust graph created from +// a static list of grants. This graph is immutable after creation +// and any alterations should create a new instance. +func NewMemoryGraph(grants []*Grant) TrustGraph { + roots := map[string]*grantNode{} + for _, grant := range grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + if part != "" { + node.grants = append(node.grants, grant) + } + nodes = node.children + } + } + return &memoryGraph{roots} +} + +func (g *memoryGraph) getGrants(name string) []*Grant { + nameParts := strings.Split(name, "/") + nodes := g.roots + var node *grantNode + var nodeOk bool + for _, part := range nameParts { + node, nodeOk = nodes[part] + if !nodeOk { + return nil + } + nodes = node.children + } + return node.grants +} + +func isSubName(name, sub string) bool { + if strings.HasPrefix(name, sub) { + if len(name) == len(sub) || name[len(sub)] == '/' { + return true + } + } + return false +} + +type walkFunc func(*Grant, []*Grant) bool + +func foundWalkFunc(*Grant, []*Grant) bool { + return true +} + +func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { + if visited == nil { + visited = map[*Grant]bool{} + } + grants := g.getGrants(start) + subGrants := make([]*Grant, 0, len(grants)) + for _, grant := range grants { + if visited[grant] { + continue + } + visited[grant] = true + if grant.Permission&permission == permission { + if isSubName(target, grant.Subject) { + if f(grant, chain) { + return true + } + } else { + subGrants = append(subGrants, grant) + } + } + } + for _, grant := range subGrants { + var chainCopy []*Grant + if collect { + chainCopy = make([]*Grant, len(chain)+1) + copy(chainCopy, chain) + chainCopy[len(chainCopy)-1] = grant + } else { + chainCopy = nil + } + + if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { + return true + } + } + return false +} + +func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { + return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil +} + +func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { + grants := [][]*Grant{} + collect := func(grant *Grant, chain []*Grant) bool { + grantChain := make([]*Grant, len(chain)+1) + copy(grantChain, chain) + grantChain[len(grantChain)-1] = grant + grants = append(grants, grantChain) + return false + } + g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) + return grants, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/trustgraph/memory_graph_test.go docker.io-1.3.2~dfsg1/libtrust/trustgraph/memory_graph_test.go --- docker.io-0.9.1~dfsg1/libtrust/trustgraph/memory_graph_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/trustgraph/memory_graph_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,174 @@ +package trustgraph + +import ( + "fmt" + "testing" + + "github.com/docker/libtrust" +) + +func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { + grants := make([]*Grant, count) + keys := make([]libtrust.PrivateKey, count) + for i := 0; i < count; i++ { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + grant := &Grant{ + Subject: fmt.Sprintf("/user-%d", i+1), + Permission: 0x0f, + Grantee: pk.KeyID(), + } + keys[i] = pk + grants[i] = grant + } + return grants, keys +} + +func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if !ok { + t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if ok { + t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func TestVerify(t *testing.T) { + grants, keys := createTestKeysAndGrants(4) + extraGrants := make([]*Grant, 3) + extraGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-4", + Permission: 0x07, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) + testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) +} + +func TestCircularWalk(t *testing.T) { + grants, keys := createTestKeysAndGrants(3) + user1Grant := &Grant{ + Subject: "/user-2", + Permission: 0x0f, + Grantee: "/user-1", + } + user2Grant := &Grant{ + Subject: "/user-1", + Permission: 0x0f, + Grantee: "/user-2", + } + grants = append(grants, user1Grant, user2Grant) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) +} + +func assertGrantSame(t *testing.T, actual, expected *Grant) { + if actual != expected { + t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) + } +} + +func TestGetGrants(t *testing.T) { + grants, keys := createTestKeysAndGrants(5) + extraGrants := make([]*Grant, 4) + extraGrants[0] = &Grant{ + Subject: "/user-3/friend-project", + Permission: 0x0f, + Grantee: "/user-2/friends", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-2/friends", + Permission: 0x0f, + Grantee: "/user-5/fun-project", + } + extraGrants[3] = &Grant{ + Subject: "/user-5/fun-project", + Permission: 0x0f, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 2 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[3]) + assertGrantSame(t, grantChains[0][1], extraGrants[1]) + + grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 4 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[0]) + assertGrantSame(t, grantChains[0][1], extraGrants[3]) + assertGrantSame(t, grantChains[0][2], extraGrants[2]) + assertGrantSame(t, grantChains[0][3], extraGrants[0]) +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/trustgraph/statement.go docker.io-1.3.2~dfsg1/libtrust/trustgraph/statement.go --- docker.io-0.9.1~dfsg1/libtrust/trustgraph/statement.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/trustgraph/statement.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,227 @@ +package trustgraph + +import ( + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/docker/libtrust" +) + +type jsonGrant struct { + Subject string `json:"subject"` + Permission uint16 `json:"permission"` + Grantee string `json:"grantee"` +} + +type jsonRevocation struct { + Subject string `json:"subject"` + Revocation uint16 `json:"revocation"` + Grantee string `json:"grantee"` +} + +type jsonStatement struct { + Revocations []*jsonRevocation `json:"revocations"` + Grants []*jsonGrant `json:"grants"` + Expiration time.Time `json:"expiration"` + IssuedAt time.Time `json:"issuedAt"` +} + +func (g *jsonGrant) Grant(statement *Statement) *Grant { + return &Grant{ + Subject: g.Subject, + Permission: g.Permission, + Grantee: g.Grantee, + statement: statement, + } +} + +// Statement represents a set of grants made from a verifiable +// authority. A statement has an expiration associated with it +// set by the authority. +type Statement struct { + jsonStatement + + signature *libtrust.JSONSignature +} + +// IsExpired returns whether the statement has expired +func (s *Statement) IsExpired() bool { + return s.Expiration.Before(time.Now().Add(-10 * time.Second)) +} + +// Bytes returns an indented json representation of the statement +// in a byte array. This value can be written to a file or stream +// without alteration. +func (s *Statement) Bytes() ([]byte, error) { + return s.signature.PrettySignature("signatures") +} + +// LoadStatement loads and verifies a statement from an input stream. +func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + js, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return nil, err + } + payload, err := js.Payload() + if err != nil { + return nil, err + } + var statement Statement + err = json.Unmarshal(payload, &statement.jsonStatement) + if err != nil { + return nil, err + } + + if authority == nil { + _, err = js.Verify() + if err != nil { + return nil, err + } + } else { + _, err = js.VerifyChains(authority) + if err != nil { + return nil, err + } + } + statement.signature = js + + return &statement, nil +} + +// CreateStatements creates and signs a statement from a stream of grants +// and revocations in a JSON array. +func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) + if err != nil { + return nil, err + } + err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) + if err != nil { + return nil, err + } + statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) + statement.jsonStatement.IssuedAt = time.Now().UTC() + + b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + statement.signature, err = libtrust.NewJSONSignature(b) + if err != nil { + return nil, err + } + err = statement.signature.SignWithChain(key, chain) + if err != nil { + return nil, err + } + + return &statement, nil +} + +type statementList []*Statement + +func (s statementList) Len() int { + return len(s) +} + +func (s statementList) Less(i, j int) bool { + return s[i].IssuedAt.Before(s[j].IssuedAt) +} + +func (s statementList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// CollapseStatements returns a single list of the valid statements as well as the +// time when the next grant will expire. +func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { + sorted := make(statementList, 0, len(statements)) + for _, statement := range statements { + if useExpired || !statement.IsExpired() { + sorted = append(sorted, statement) + } + } + sort.Sort(sorted) + + var minExpired time.Time + var grantCount int + roots := map[string]*grantNode{} + for i, statement := range sorted { + if statement.Expiration.Before(minExpired) || i == 0 { + minExpired = statement.Expiration + } + for _, grant := range statement.Grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + g := grant.Grant(statement) + grantCount = grantCount + 1 + + for _, part := range parts { + node, nodeOk := nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + node.grants = append(node.grants, g) + nodes = node.children + } + } + + for _, revocation := range statement.Revocations { + parts := strings.Split(revocation.Grantee, "/") + nodes := roots + + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + break + } + nodes = node.children + } + if node != nil { + for _, grant := range node.grants { + if isSubName(grant.Subject, revocation.Subject) { + grant.Permission = grant.Permission &^ revocation.Revocation + } + } + } + } + } + + retGrants := make([]*Grant, 0, grantCount) + for _, rootNodes := range roots { + retGrants = append(retGrants, rootNodes.grants...) + } + + return retGrants, minExpired, nil +} + +// FilterStatements filters the statements to statements including the given grants. +func FilterStatements(grants []*Grant) ([]*Statement, error) { + statements := map[*Statement]bool{} + for _, grant := range grants { + if grant.statement != nil { + statements[grant.statement] = true + } + } + retStatements := make([]*Statement, len(statements)) + var i int + for statement := range statements { + retStatements[i] = statement + i++ + } + return retStatements, nil +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/trustgraph/statement_test.go docker.io-1.3.2~dfsg1/libtrust/trustgraph/statement_test.go --- docker.io-0.9.1~dfsg1/libtrust/trustgraph/statement_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/trustgraph/statement_test.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,417 @@ +package trustgraph + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "testing" + "time" + + "github.com/docker/libtrust" + "github.com/docker/libtrust/testutil" +) + +const testStatementExpiration = time.Hour * 5 + +func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + + statement.Grants = make([]*jsonGrant, len(grants)) + for i, grant := range grants { + statement.Grants[i] = &jsonGrant{ + Subject: grant.Subject, + Permission: grant.Permission, + Grantee: grant.Grantee, + } + } + statement.IssuedAt = time.Now() + statement.Expiration = time.Now().Add(testStatementExpiration) + statement.Revocations = make([]*jsonRevocation, 0) + + marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + sig, err := libtrust.NewJSONSignature(marshalled) + if err != nil { + return nil, err + } + err = sig.SignWithChain(key, chain) + if err != nil { + return nil, err + } + statement.signature = sig + + return &statement, nil +} + +func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { + caKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + + parent := ca + parentKey := caKey + chain := make([]*x509.Certificate, chainLen) + for i := chainLen - 1; i > 0; i-- { + intermediatekey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + caPool := x509.NewCertPool() + caPool.AddCert(ca) + + return trustKey, caPool, chain +} + +func TestLoadStatement(t *testing.T) { + grantCount := 4 + grants, _ := createTestKeysAndGrants(grantCount) + + trustKey, caPool, chain := generateTrustChain(t, 6) + + statement, err := generateStatement(grants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementBytes, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + + s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + pool := x509.NewCertPool() + _, err = LoadStatement(bytes.NewReader(statementBytes), pool) + if err == nil { + t.Fatalf("No error thrown verifying without an authority") + } else if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("Unexpected error verifying without authority: %s", err) + } + + s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + badData := make([]byte, len(statementBytes)) + copy(badData, statementBytes) + badData[0] = '[' + _, err = LoadStatement(bytes.NewReader(badData), nil) + if err == nil { + t.Fatalf("No error thrown parsing bad json") + } + + alteredData := make([]byte, len(statementBytes)) + copy(alteredData, statementBytes) + alteredData[30] = '0' + _, err = LoadStatement(bytes.NewReader(alteredData), nil) + if err == nil { + t.Fatalf("No error thrown from bad data") + } +} + +func TestCollapseGrants(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 4) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-6", + Permission: 0x0f, + Grantee: "/user-7", + } + linkGrants[3] = &Grant{ + Subject: "/user-6/sub-project/specific-app", + Permission: 0x0f, + Grantee: "/user-5", + } + trustKey, pool, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 3) + var err error + statements[0], err = generateStatement(grants[0:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[4:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementsCopy := make([]*Statement, len(statements)) + for i, statement := range statements { + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + // Force sort by reversing order + statementsCopy[len(statementsCopy)-i-1] = verifiedStatement + } + statements = statementsCopy + + collapsedGrants, expiration, err := CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g := NewMemoryGraph(collapsedGrants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) + testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) + testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) + + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) + testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) + + // Add revocation grant + statements = append(statements, &Statement{ + jsonStatement{ + IssuedAt: time.Now(), + Expiration: time.Now().Add(testStatementExpiration), + Grants: []*jsonGrant{}, + Revocations: []*jsonRevocation{ + &jsonRevocation{ + Subject: "/user-1", + Revocation: 0x0f, + Grantee: keys[0].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-2", + Revocation: 0x08, + Grantee: keys[1].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-6", + Revocation: 0x0f, + Grantee: "/user-7", + }, + &jsonRevocation{ + Subject: "/user-9", + Revocation: 0x0f, + Grantee: "/user-10", + }, + }, + }, + nil, + }) + + collapsedGrants, expiration, err = CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g = NewMemoryGraph(collapsedGrants) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) +} + +func TestFilterStatements(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 3) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-5", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-7", + Permission: 0x0f, + Grantee: "/user-6", + } + + trustKey, _, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 5) + var err error + statements[0], err = generateStatement(grants[0:2], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[2:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(grants[4:6], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[3], err = generateStatement(grants[6:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[4], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + collapsed, _, err := CollapseStatements(statements, false) + if err != nil { + t.Fatalf("Error collapsing grants: %s", err) + } + + // Filter 1, all 5 statements + filter1, err := FilterStatements(collapsed) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter1) != 5 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) + } + + // Filter 2, one statement + filter2, err := FilterStatements([]*Grant{collapsed[0]}) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter2) != 1 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) + } + + // Filter 3, 2 statements, from graph lookup + g := NewMemoryGraph(collapsed) + lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) + if err != nil { + t.Fatalf("Error looking up grants: %s", err) + } + if len(lookupGrants) != 1 { + t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) + } + if len(lookupGrants[0]) != 2 { + t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) + } + filter3, err := FilterStatements(lookupGrants[0]) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter3) != 2 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) + } + +} + +func TestCreateStatement(t *testing.T) { + grantJSON := bytes.NewReader([]byte(`[ + { + "subject": "/user-2", + "permission": 15, + "grantee": "/user-1" + }, + { + "subject": "/user-7", + "permission": 1, + "grantee": "/user-9" + }, + { + "subject": "/user-3", + "permission": 15, + "grantee": "/user-2" + } +]`)) + revocationJSON := bytes.NewReader([]byte(`[ + { + "subject": "user-8", + "revocation": 12, + "grantee": "user-9" + } +]`)) + + trustKey, pool, chain := generateTrustChain(t, 3) + + statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) + if err != nil { + t.Fatalf("Error creating statement: %s", err) + } + + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error retrieving bytes: %s", err) + } + + verified, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + + if len(verified.Grants) != 3 { + t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) + } + + if len(verified.Revocations) != 1 { + t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) + } +} diff -Nru docker.io-0.9.1~dfsg1/libtrust/util.go docker.io-1.3.2~dfsg1/libtrust/util.go --- docker.io-0.9.1~dfsg1/libtrust/util.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/libtrust/util.go 2014-09-30 21:18:50.000000000 +0000 @@ -0,0 +1,209 @@ +package libtrust + +import ( + "bytes" + "crypto/elliptic" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff -Nru docker.io-0.9.1~dfsg1/LICENSE docker.io-1.3.2~dfsg1/LICENSE --- docker.io-0.9.1~dfsg1/LICENSE 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/LICENSE 2014-11-24 17:38:01.000000000 +0000 @@ -176,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff -Nru docker.io-0.9.1~dfsg1/links/links.go docker.io-1.3.2~dfsg1/links/links.go --- docker.io-0.9.1~dfsg1/links/links.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/links/links.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,8 +2,8 @@ import ( "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" "path" "strings" ) @@ -49,7 +49,7 @@ func (l *Link) ToEnv() []string { env := []string{} - alias := strings.ToUpper(l.Alias()) + alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) if p := l.getDefaultPort(); p != nil { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) diff -Nru docker.io-0.9.1~dfsg1/links/links_test.go docker.io-1.3.2~dfsg1/links/links_test.go --- docker.io-0.9.1~dfsg1/links/links_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/links/links_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,11 +1,41 @@ package links import ( - "github.com/dotcloud/docker/nat" + "github.com/docker/docker/nat" "strings" "testing" ) +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + func TestLinkNew(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} diff -Nru docker.io-0.9.1~dfsg1/.mailmap docker.io-1.3.2~dfsg1/.mailmap --- docker.io-0.9.1~dfsg1/.mailmap 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/.mailmap 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,9 @@ -# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf + @@ -6,14 +11,16 @@ + - -Thatcher Peskens dhrp -Thatcher Peskens dhrp +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni -Joffrey F - +Joffrey F +Joffrey F +Joffrey F Tim Terhorst Andy Smith @@ -23,7 +30,6 @@ -Thatcher Peskens Walter Stanish @@ -46,15 +52,48 @@ Daniel Mizyrycki root Jean-Baptiste Dalido - - + + + - - -Sven Dowideit ¨Sven <¨SvenDowideit@home.org.au¨> + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> unclejack + +Alexandr Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + +Francisco Carriedo + diff -Nru docker.io-0.9.1~dfsg1/MAINTAINERS docker.io-1.3.2~dfsg1/MAINTAINERS --- docker.io-0.9.1~dfsg1/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1,7 +1,9 @@ -Solomon Hykes (@shykes) -Guillaume Charmes (@creack) +Solomon Hykes (@shykes) Victor Vieux (@vieux) Michael Crosby (@crosbymichael) +.mailmap: Tianon Gravi (@tianon) .travis.yml: Tianon Gravi (@tianon) +AUTHORS: Tianon Gravi (@tianon) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) +.dockerignore: Tianon Gravi (@tianon) diff -Nru docker.io-0.9.1~dfsg1/Makefile docker.io-1.3.2~dfsg1/Makefile --- docker.io-0.9.1~dfsg1/Makefile 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/Makefile 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,20 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) -DOCKER_IMAGE := docker:$(GIT_BRANCH) -DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) -DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)" +# to allow `make BINDDIR=. shell` or `make BINDDIR= test` +# (default to no bind mount if DOCKER_HOST is set) +BINDDIR := $(if $(DOCKER_HOST),,bundles) +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") + +DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +# to allow `make DOCSDIR=docs docs-shell` +DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET default: binary @@ -17,17 +28,29 @@ $(DOCKER_RUN_DOCKER) hack/make.sh binary cross docs: docs-build - docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve docs-shell: docs-build - docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash + $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash + +docs-release: docs-build + $(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build - $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration + $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli + +test-unit: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration +test-integration-cli: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli + +validate: build + $(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco + shell: build $(DOCKER_RUN_DOCKER) bash @@ -35,6 +58,10 @@ docker build -t "$(DOCKER_IMAGE)" . docs-build: + cp ./VERSION docs/VERSION + echo "$(GIT_BRANCH)" > docs/GIT_BRANCH + echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET + echo "$(GITCOMMIT)" > docs/GITCOMMIT docker build -t "$(DOCKER_DOCS_IMAGE)" docs bundles: diff -Nru docker.io-0.9.1~dfsg1/nat/nat.go docker.io-1.3.2~dfsg1/nat/nat.go --- docker.io-0.9.1~dfsg1/nat/nat.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/nat/nat.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,14 +5,16 @@ import ( "fmt" - "github.com/dotcloud/docker/utils" + "net" "strconv" "strings" + + "github.com/docker/docker/pkg/parsers" ) const ( PortSpecTemplate = "ip:hostPort:containerPort" - PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" + PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort" ) type PortBinding struct { @@ -59,17 +61,34 @@ return i } -// Splits a port in the format of port/proto +// Splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { + var port string + var proto string + parts := strings.Split(rawPort, "/") - l := len(parts) - if l == 0 { - return "", "" + + if len(parts) == 0 || parts[0] == "" { // we have "" or ""/ + port = "" + proto = "" + } else { // we have # or #/ or #/... + port = parts[0] + if len(parts) > 1 && parts[1] != "" { + proto = parts[1] // we have #/... + } else { + proto = "tcp" // we have # or #/ + } } - if l == 1 { - return "tcp", rawPort + return proto, port +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } } - return parts[0], parts[1] + return false } // We will receive port specs in the format of ip:public:private/proto and these need to be @@ -93,7 +112,7 @@ rawPort = fmt.Sprintf(":%s", rawPort) } - parts, err := utils.PartParser(PortSpecTemplate, rawPort) + parts, err := parsers.PartParser(PortSpecTemplate, rawPort) if err != nil { return nil, nil, err } @@ -104,6 +123,9 @@ hostPort = parts["hostPort"] ) + if rawIp != "" && net.ParseIP(rawIp) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp) + } if containerPort == "" { return nil, nil, fmt.Errorf("No port specified: %s", rawPort) } @@ -114,6 +136,10 @@ return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } + if !validateProto(proto) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + port := NewPort(proto, containerPort) if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} diff -Nru docker.io-0.9.1~dfsg1/nat/nat_test.go docker.io-1.3.2~dfsg1/nat/nat_test.go --- docker.io-0.9.1~dfsg1/nat/nat_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/nat/nat_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,201 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestPort(t *testing.T) { + p := NewPort("tcp", "1234") + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results") + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'") + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "0.0.0.0" { + t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/ipallocator/allocator.go docker.io-1.3.2~dfsg1/networkdriver/ipallocator/allocator.go --- docker.io-0.9.1~dfsg1/networkdriver/ipallocator/allocator.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/ipallocator/allocator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -package ipallocator - -import ( - "encoding/binary" - "errors" - "github.com/dotcloud/docker/networkdriver" - "github.com/dotcloud/docker/pkg/collections" - "net" - "sync" -) - -type networkSet map[string]*collections.OrderedIntSet - -var ( - ErrNoAvailableIPs = errors.New("no available ip addresses on network") - ErrIPAlreadyAllocated = errors.New("ip already allocated") -) - -var ( - lock = sync.Mutex{} - allocatedIPs = networkSet{} - availableIPS = networkSet{} -) - -// RequestIP requests an available ip from the given network. It -// will return the next available ip if the ip provided is nil. If the -// ip provided is not nil it will validate that the provided ip is available -// for use or return an error -func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { - lock.Lock() - defer lock.Unlock() - - checkAddress(address) - - if ip == nil { - next, err := getNextIp(address) - if err != nil { - return nil, err - } - return next, nil - } - - if err := registerIP(address, ip); err != nil { - return nil, err - } - return ip, nil -} - -// ReleaseIP adds the provided ip back into the pool of -// available ips to be returned for use. -func ReleaseIP(address *net.IPNet, ip *net.IP) error { - lock.Lock() - defer lock.Unlock() - - checkAddress(address) - - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - existing.Remove(int(pos)) - available.Push(int(pos)) - - return nil -} - -// convert the ip into the position in the subnet. Only -// position are saved in the set -func getPosition(address *net.IPNet, ip *net.IP) int32 { - var ( - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - i = ipToInt(ip) - ) - return i - base -} - -// return an available ip if one is currently available. If not, -// return the next available ip for the nextwork -func getNextIp(address *net.IPNet) (*net.IP, error) { - var ( - ownIP = ipToInt(&address.IP) - available = availableIPS[address.String()] - allocated = allocatedIPs[address.String()] - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - size = int(networkdriver.NetworkSize(address.Mask)) - max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address - pos = int32(available.Pop()) - ) - - // We pop and push the position not the ip - if pos != 0 { - ip := intToIP(int32(base + pos)) - allocated.Push(int(pos)) - - return ip, nil - } - - var ( - firstNetIP = address.IP.To4().Mask(address.Mask) - firstAsInt = ipToInt(&firstNetIP) + 1 - ) - - pos = int32(allocated.PullBack()) - for i := int32(0); i < max; i++ { - pos = pos%max + 1 - next := int32(base + pos) - - if next == ownIP || next == firstAsInt { - continue - } - - if !allocated.Exists(int(pos)) { - ip := intToIP(next) - allocated.Push(int(pos)) - return ip, nil - } - } - return nil, ErrNoAvailableIPs -} - -func registerIP(address *net.IPNet, ip *net.IP) error { - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - if existing.Exists(int(pos)) { - return ErrIPAlreadyAllocated - } - available.Remove(int(pos)) - - return nil -} - -// Converts a 4 bytes IP into a 32 bit integer -func ipToInt(ip *net.IP) int32 { - return int32(binary.BigEndian.Uint32(ip.To4())) -} - -// Converts 32 bit integer into a 4 bytes IP address -func intToIP(n int32) *net.IP { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, uint32(n)) - ip := net.IP(b) - return &ip -} - -func checkAddress(address *net.IPNet) { - key := address.String() - if _, exists := allocatedIPs[key]; !exists { - allocatedIPs[key] = collections.NewOrderedIntSet() - availableIPS[key] = collections.NewOrderedIntSet() - } -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/ipallocator/allocator_test.go docker.io-1.3.2~dfsg1/networkdriver/ipallocator/allocator_test.go --- docker.io-0.9.1~dfsg1/networkdriver/ipallocator/allocator_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/ipallocator/allocator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,241 +0,0 @@ -package ipallocator - -import ( - "fmt" - "net" - "testing" -) - -func reset() { - allocatedIPs = networkSet{} - availableIPS = networkSet{} -} - -func TestRequestNewIps(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - for i := 2; i < 10; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { - t.Fatalf("Expected ip %s got %s", expected, ip.String()) - } - } -} - -func TestReleaseIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if err := ReleaseIP(network, ip); err != nil { - t.Fatal(err) - } -} - -func TestGetReleasedIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - value := ip.String() - if err := ReleaseIP(network, ip); err != nil { - t.Fatal(err) - } - - ip, err = RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if ip.String() != value { - t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) - } -} - -func TestRequesetSpecificIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip := net.ParseIP("192.168.1.5") - - if _, err := RequestIP(network, &ip); err != nil { - t.Fatal(err) - } -} - -func TestConversion(t *testing.T) { - ip := net.ParseIP("127.0.0.1") - i := ipToInt(&ip) - if i == 0 { - t.Fatal("converted to zero") - } - conv := intToIP(i) - if !ip.Equal(*conv) { - t.Error(conv.String()) - } -} - -func TestIPAllocator(t *testing.T) { - expectedIPs := []net.IP{ - 0: net.IPv4(127, 0, 0, 2), - 1: net.IPv4(127, 0, 0, 3), - 2: net.IPv4(127, 0, 0, 4), - 3: net.IPv4(127, 0, 0, 5), - 4: net.IPv4(127, 0, 0, 6), - } - - gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") - network := &net.IPNet{IP: gwIP, Mask: n.Mask} - // Pool after initialisation (f = free, u = used) - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that - // order. - for i := 0; i < 5; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - assertIPEquals(t, &expectedIPs[i], ip) - } - // Before loop begin - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) - // ↑ - - // After i = 3 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) - // ↑ - - // After i = 4 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Check that there are no more IPs - ip, err := RequestIP(network, nil) - if err == nil { - t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) - } - - // Release some IPs in non-sequential order - if err := ReleaseIP(network, &expectedIPs[3]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) - // ↑ - - if err := ReleaseIP(network, &expectedIPs[2]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) - // ↑ - - if err := ReleaseIP(network, &expectedIPs[4]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // Make sure that IPs are reused in sequential order, starting - // with the first released IP - newIPs := make([]*net.IP, 3) - for i := 0; i < 3; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - newIPs[i] = ip - } - // Before loop begin - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Reordered these because the new set will always return the - // lowest ips first and not in the order that they were released - assertIPEquals(t, &expectedIPs[2], newIPs[0]) - assertIPEquals(t, &expectedIPs[3], newIPs[1]) - assertIPEquals(t, &expectedIPs[4], newIPs[2]) - - _, err = RequestIP(network, nil) - if err == nil { - t.Fatal("There shouldn't be any IP addresses at this point") - } -} - -func TestAllocateFirstIP(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 0}, - Mask: []byte{255, 255, 255, 0}, - } - - firstIP := network.IP.To4().Mask(network.Mask) - first := ipToInt(&firstIP) + 1 - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - allocated := ipToInt(ip) - - if allocated == first { - t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) - } -} - -func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { - if !ip1.Equal(*ip2) { - t.Fatalf("Expected IP %s, got %s", ip1, ip2) - } -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/lxc/driver.go docker.io-1.3.2~dfsg1/networkdriver/lxc/driver.go --- docker.io-0.9.1~dfsg1/networkdriver/lxc/driver.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/lxc/driver.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,482 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/networkdriver" - "github.com/dotcloud/docker/networkdriver/ipallocator" - "github.com/dotcloud/docker/networkdriver/portallocator" - "github.com/dotcloud/docker/networkdriver/portmapper" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "net" - "strings" - "syscall" - "unsafe" -) - -const ( - DefaultNetworkBridge = "docker0" - siocBRADDBR = 0x89a0 -) - -// Network interface represents the networking stack of a container -type networkInterface struct { - IP net.IP - PortMappings []net.Addr // there are mappings to the host interfaces -} - -var ( - addrs = []string{ - // Here we don't follow the convention of using the 1st IP of the range for the gateway. - // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. - // In theory this shouldn't matter - in practice there's bound to be a few scripts relying - // on the internal addressing or other stupid things like that. - // The shouldn't, but hey, let's not break them unless we really have to. - "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 - "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive - "10.1.42.1/16", - "10.42.42.1/16", - "172.16.42.1/24", - "172.16.43.1/24", - "172.16.44.1/24", - "10.0.42.1/24", - "10.0.43.1/24", - "192.168.42.1/24", - "192.168.43.1/24", - "192.168.44.1/24", - } - - bridgeIface string - bridgeNetwork *net.IPNet - - defaultBindingIP = net.ParseIP("0.0.0.0") - currentInterfaces = make(map[string]*networkInterface) -) - -func InitDriver(job *engine.Job) engine.Status { - var ( - network *net.IPNet - enableIPTables = job.GetenvBool("EnableIptables") - icc = job.GetenvBool("InterContainerCommunication") - ipForward = job.GetenvBool("EnableIpForward") - bridgeIP = job.Getenv("BridgeIP") - ) - - if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { - defaultBindingIP = net.ParseIP(defaultIP) - } - - bridgeIface = job.Getenv("BridgeIface") - if bridgeIface == "" { - bridgeIface = DefaultNetworkBridge - } - - addr, err := networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - // If the iface is not found, try to create it - job.Logf("creating new bridge for %s", bridgeIface) - if err := createBridge(bridgeIP); err != nil { - job.Error(err) - return engine.StatusErr - } - - job.Logf("getting iface addr") - addr, err = networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - network = addr.(*net.IPNet) - } else { - network = addr.(*net.IPNet) - } - - // Configure iptables for link support - if enableIPTables { - if err := setupIPTables(addr, icc); err != nil { - job.Error(err) - return engine.StatusErr - } - } - - if ipForward { - // Enable IPv4 forwarding - if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { - job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) - } - } - - // We can always try removing the iptables - if err := iptables.RemoveExistingChain("DOCKER"); err != nil { - job.Error(err) - return engine.StatusErr - } - - if enableIPTables { - chain, err := iptables.NewChain("DOCKER", bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - portmapper.SetIptablesChain(chain) - } - - bridgeNetwork = network - - // https://github.com/dotcloud/docker/issues/2768 - job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) - - for name, f := range map[string]engine.Handler{ - "allocate_interface": Allocate, - "release_interface": Release, - "allocate_port": AllocatePort, - "link": LinkContainers, - } { - if err := job.Eng.Register(name, f); err != nil { - job.Error(err) - return engine.StatusErr - } - } - return engine.StatusOK -} - -func setupIPTables(addr net.Addr, icc bool) error { - // Enable NAT - natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} - - if !iptables.Exists(natArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { - return fmt.Errorf("Unable to enable network bridge NAT: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables postrouting: %s", output) - } - } - - var ( - args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} - acceptArgs = append(args, "ACCEPT") - dropArgs = append(args, "DROP") - ) - - if !icc { - iptables.Raw(append([]string{"-D"}, acceptArgs...)...) - - if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { - return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error disabling intercontainer communication: %s", output) - } - } - } else { - iptables.Raw(append([]string{"-D"}, dropArgs...)...) - - if !iptables.Exists(acceptArgs...) { - utils.Debugf("Enable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { - return fmt.Errorf("Unable to allow intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error enabling intercontainer communication: %s", output) - } - } - } - - // Accept all non-intercontainer outgoing packets - outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} - if !iptables.Exists(outgoingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow outgoing packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow outgoing: %s", output) - } - } - - // Accept incoming packets for existing connections - existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} - - if !iptables.Exists(existingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow incoming packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow incoming: %s", output) - } - } - return nil -} - -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func createBridge(bridgeIP string) error { - nameservers := []string{} - resolvConf, _ := utils.GetResolvConf() - // we don't check for an error here, because we don't really care - // if we can't read /etc/resolv.conf. So instead we skip the append - // if resolvConf is nil. It either doesn't exist, or we can't read it - // for some reason. - if resolvConf != nil { - nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) - } - - var ifaceAddr string - if len(bridgeIP) != 0 { - _, _, err := net.ParseCIDR(bridgeIP) - if err != nil { - return err - } - ifaceAddr = bridgeIP - } else { - for _, addr := range addrs { - _, dockerNetwork, err := net.ParseCIDR(addr) - if err != nil { - return err - } - if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { - if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { - ifaceAddr = addr - break - } else { - utils.Debugf("%s %s", addr, err) - } - } - } - } - - if ifaceAddr == "" { - return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) - } - utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) - - if err := createBridgeIface(bridgeIface); err != nil { - return err - } - - iface, err := net.InterfaceByName(bridgeIface) - if err != nil { - return err - } - - ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) - if err != nil { - return err - } - - if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { - return fmt.Errorf("Unable to add private network: %s", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to start network bridge: %s", err) - } - return nil -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func createBridgeIface(name string) error { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) - s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - return fmt.Errorf("Error creating bridge creation socket: %s", err) - } - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return fmt.Errorf("Error creating bridge: %s", err) - } - return nil -} - -// Allocate a network interface -func Allocate(job *engine.Job) engine.Status { - var ( - ip *net.IP - err error - id = job.Args[0] - requestedIP = net.ParseIP(job.Getenv("RequestedIP")) - ) - - if requestedIP != nil { - ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) - } else { - ip, err = ipallocator.RequestIP(bridgeNetwork, nil) - } - if err != nil { - job.Error(err) - return engine.StatusErr - } - - out := engine.Env{} - out.Set("IP", ip.String()) - out.Set("Mask", bridgeNetwork.Mask.String()) - out.Set("Gateway", bridgeNetwork.IP.String()) - out.Set("Bridge", bridgeIface) - - size, _ := bridgeNetwork.Mask.Size() - out.SetInt("IPPrefixLen", size) - - currentInterfaces[id] = &networkInterface{ - IP: *ip, - } - - out.WriteTo(job.Stdout) - - return engine.StatusOK -} - -// release an interface for a select ip -func Release(job *engine.Job) engine.Status { - var ( - id = job.Args[0] - containerInterface = currentInterfaces[id] - ip net.IP - port int - proto string - ) - - if containerInterface == nil { - return job.Errorf("No network information to release for %s", id) - } - - for _, nat := range containerInterface.PortMappings { - if err := portmapper.Unmap(nat); err != nil { - log.Printf("Unable to unmap port %s: %s", nat, err) - } - - // this is host mappings - switch a := nat.(type) { - case *net.TCPAddr: - proto = "tcp" - ip = a.IP - port = a.Port - case *net.UDPAddr: - proto = "udp" - ip = a.IP - port = a.Port - } - - if err := portallocator.ReleasePort(ip, proto, port); err != nil { - log.Printf("Unable to release port %s", nat) - } - } - - if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { - log.Printf("Unable to release ip %s\n", err) - } - return engine.StatusOK -} - -// Allocate an external port and map it to the interface -func AllocatePort(job *engine.Job) engine.Status { - var ( - err error - - ip = defaultBindingIP - id = job.Args[0] - hostIP = job.Getenv("HostIP") - hostPort = job.GetenvInt("HostPort") - containerPort = job.GetenvInt("ContainerPort") - proto = job.Getenv("Proto") - network = currentInterfaces[id] - ) - - if hostIP != "" { - ip = net.ParseIP(hostIP) - } - - // host ip, proto, and host port - hostPort, err = portallocator.RequestPort(ip, proto, hostPort) - if err != nil { - job.Error(err) - return engine.StatusErr - } - - var ( - container net.Addr - host net.Addr - ) - - if proto == "tcp" { - host = &net.TCPAddr{IP: ip, Port: hostPort} - container = &net.TCPAddr{IP: network.IP, Port: containerPort} - } else { - host = &net.UDPAddr{IP: ip, Port: hostPort} - container = &net.UDPAddr{IP: network.IP, Port: containerPort} - } - - if err := portmapper.Map(container, ip, hostPort); err != nil { - portallocator.ReleasePort(ip, proto, hostPort) - - job.Error(err) - return engine.StatusErr - } - network.PortMappings = append(network.PortMappings, host) - - out := engine.Env{} - out.Set("HostIP", ip.String()) - out.SetInt("HostPort", hostPort) - - if _, err := out.WriteTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr - } - return engine.StatusOK -} - -func LinkContainers(job *engine.Job) engine.Status { - var ( - action = job.Args[0] - childIP = job.Getenv("ChildIP") - parentIP = job.Getenv("ParentIP") - ignoreErrors = job.GetenvBool("IgnoreErrors") - ports = job.GetenvList("Ports") - ) - split := func(p string) (string, string) { - parts := strings.Split(p, "/") - return parts[0], parts[1] - } - - for _, p := range ports { - port, proto := split(p) - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", parentIP, - "--dport", port, - "-d", childIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", childIP, - "--sport", port, - "-d", parentIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - } - return engine.StatusOK -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/network.go docker.io-1.3.2~dfsg1/networkdriver/network.go --- docker.io-0.9.1~dfsg1/networkdriver/network.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/network.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -package networkdriver - -import ( - "errors" -) - -var ( - ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") - ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") -) diff -Nru docker.io-0.9.1~dfsg1/networkdriver/network_test.go docker.io-1.3.2~dfsg1/networkdriver/network_test.go --- docker.io-0.9.1~dfsg1/networkdriver/network_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/network_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -package networkdriver - -import ( - "github.com/dotcloud/docker/pkg/netlink" - "net" - "testing" -) - -func TestNonOverlapingNameservers(t *testing.T) { - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - nameservers := []string{ - "127.0.0.1/32", - } - - if err := CheckNameserverOverlaps(nameservers, network); err != nil { - t.Fatal(err) - } -} - -func TestOverlapingNameservers(t *testing.T) { - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - nameservers := []string{ - "192.168.0.1/32", - } - - if err := CheckNameserverOverlaps(nameservers, network); err == nil { - t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) - } -} - -func TestCheckRouteOverlaps(t *testing.T) { - orig := networkGetRoutesFct - defer func() { - networkGetRoutesFct = orig - }() - networkGetRoutesFct = func() ([]netlink.Route, error) { - routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} - - routes := []netlink.Route{} - for _, addr := range routesData { - _, netX, _ := net.ParseCIDR(addr) - routes = append(routes, netlink.Route{IPNet: netX}) - } - return routes, nil - } - - _, netX, _ := net.ParseCIDR("172.16.0.1/24") - if err := CheckRouteOverlaps(netX); err != nil { - t.Fatal(err) - } - - _, netX, _ = net.ParseCIDR("10.0.2.0/24") - if err := CheckRouteOverlaps(netX); err == nil { - t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") - } -} - -func TestCheckNameserverOverlaps(t *testing.T) { - nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} - - _, netX, _ := net.ParseCIDR("10.0.2.3/32") - - if err := CheckNameserverOverlaps(nameservers, netX); err == nil { - t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) - } - - _, netX, _ = net.ParseCIDR("192.168.102.2/32") - - if err := CheckNameserverOverlaps(nameservers, netX); err != nil { - t.Fatalf("%s should not overlap %v but it does", netX, nameservers) - } -} - -func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if !NetworkOverlaps(netX, netY) { - t.Errorf("%v and %v should overlap", netX, netY) - } -} - -func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if NetworkOverlaps(netX, netY) { - t.Errorf("%v and %v should not overlap", netX, netY) - } -} - -func TestNetworkOverlaps(t *testing.T) { - //netY starts at same IP and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) - //netY starts within netX and ends at same IP - AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) - //netY starts and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) - //netY starts at same IP and ends outside of netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) - //netY starts before and ends at same IP of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) - //netY starts before and ends outside of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) - //netY starts and ends before netX - AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) - //netX starts and ends before netY - AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) -} - -func TestNetworkRange(t *testing.T) { - // Simple class C test - _, network, _ := net.ParseCIDR("192.168.0.1/24") - first, last := NetworkRange(network) - if !first.Equal(net.ParseIP("192.168.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("192.168.0.255")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 256 { - t.Error(size) - } - - // Class A test - _, network, _ = net.ParseCIDR("10.0.0.1/8") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 16777216 { - t.Error(size) - } - - // Class A, random IP address - _, network, _ = net.ParseCIDR("10.1.2.3/8") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - - // 32bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/32") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.3")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 1 { - t.Error(size) - } - - // 31bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/31") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.2")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 2 { - t.Error(size) - } - - // 26bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/26") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.63")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 64 { - t.Error(size) - } -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/portallocator/portallocator.go docker.io-1.3.2~dfsg1/networkdriver/portallocator/portallocator.go --- docker.io-0.9.1~dfsg1/networkdriver/portallocator/portallocator.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/portallocator/portallocator.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -package portallocator - -import ( - "errors" - "github.com/dotcloud/docker/pkg/collections" - "net" - "sync" -) - -const ( - BeginPortRange = 49153 - EndPortRange = 65535 -) - -type ( - portMappings map[string]*collections.OrderedIntSet - ipMapping map[string]portMappings -) - -var ( - ErrPortAlreadyAllocated = errors.New("port has already been allocated") - ErrPortExceedsRange = errors.New("port exceeds upper range") - ErrUnknownProtocol = errors.New("unknown protocol") -) - -var ( - currentDynamicPort = map[string]int{ - "tcp": BeginPortRange - 1, - "udp": BeginPortRange - 1, - } - defaultIP = net.ParseIP("0.0.0.0") - defaultAllocatedPorts = portMappings{} - otherAllocatedPorts = ipMapping{} - lock = sync.Mutex{} -) - -func init() { - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() -} - -// RequestPort returns an available port if the port is 0 -// If the provided port is not 0 then it will be checked if -// it is available for allocation -func RequestPort(ip net.IP, proto string, port int) (int, error) { - lock.Lock() - defer lock.Unlock() - - if err := validateProtocol(proto); err != nil { - return 0, err - } - - // If the user requested a specific port to be allocated - if port > 0 { - if err := registerSetPort(ip, proto, port); err != nil { - return 0, err - } - return port, nil - } - return registerDynamicPort(ip, proto) -} - -// ReleasePort will return the provided port back into the -// pool for reuse -func ReleasePort(ip net.IP, proto string, port int) error { - lock.Lock() - defer lock.Unlock() - - if err := validateProtocol(proto); err != nil { - return err - } - - allocated := defaultAllocatedPorts[proto] - allocated.Remove(port) - - if !equalsDefault(ip) { - registerIP(ip) - - // Remove the port for the specific ip address - allocated = otherAllocatedPorts[ip.String()][proto] - allocated.Remove(port) - } - return nil -} - -func ReleaseAll() error { - lock.Lock() - defer lock.Unlock() - - currentDynamicPort["tcp"] = BeginPortRange - 1 - currentDynamicPort["udp"] = BeginPortRange - 1 - - defaultAllocatedPorts = portMappings{} - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() - - otherAllocatedPorts = ipMapping{} - - return nil -} - -func registerDynamicPort(ip net.IP, proto string) (int, error) { - allocated := defaultAllocatedPorts[proto] - - port := nextPort(proto) - if port > EndPortRange { - return 0, ErrPortExceedsRange - } - - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return port, nil -} - -func registerSetPort(ip net.IP, proto string, port int) error { - allocated := defaultAllocatedPorts[proto] - if allocated.Exists(port) { - return ErrPortAlreadyAllocated - } - - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - if ipAllocated.Exists(port) { - return ErrPortAlreadyAllocated - } - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return nil -} - -func equalsDefault(ip net.IP) bool { - return ip == nil || ip.Equal(defaultIP) -} - -func nextPort(proto string) int { - c := currentDynamicPort[proto] + 1 - currentDynamicPort[proto] = c - return c -} - -func registerIP(ip net.IP) { - if _, exists := otherAllocatedPorts[ip.String()]; !exists { - otherAllocatedPorts[ip.String()] = portMappings{ - "tcp": collections.NewOrderedIntSet(), - "udp": collections.NewOrderedIntSet(), - } - } -} - -func validateProtocol(proto string) error { - if _, exists := defaultAllocatedPorts[proto]; !exists { - return ErrUnknownProtocol - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/portallocator/portallocator_test.go docker.io-1.3.2~dfsg1/networkdriver/portallocator/portallocator_test.go --- docker.io-0.9.1~dfsg1/networkdriver/portallocator/portallocator_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/portallocator/portallocator_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,184 +0,0 @@ -package portallocator - -import ( - "net" - "testing" -) - -func reset() { - ReleaseAll() -} - -func TestRequestNewPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - t.Fatal(err) - } - - if expected := BeginPortRange; port != expected { - t.Fatalf("Expected port %d got %d", expected, port) - } -} - -func TestRequestSpecificPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } -} - -func TestReleasePort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { - t.Fatal(err) - } -} - -func TestReuseReleasedPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { - t.Fatal(err) - } - - port, err = RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } -} - -func TestReleaseUnreadledPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - port, err = RequestPort(defaultIP, "tcp", 5000) - if err != ErrPortAlreadyAllocated { - t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err) - } -} - -func TestUnknowProtocol(t *testing.T) { - defer reset() - - if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { - t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) - } -} - -func TestAllocateAllPorts(t *testing.T) { - defer reset() - - for i := 0; i <= EndPortRange-BeginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - t.Fatal(err) - } - - if expected := BeginPortRange + i; port != expected { - t.Fatalf("Expected port %d got %d", expected, port) - } - } - - if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { - t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) - } - - _, err := RequestPort(defaultIP, "udp", 0) - if err != nil { - t.Fatal(err) - } -} - -func BenchmarkAllocatePorts(b *testing.B) { - defer reset() - - for i := 0; i < b.N; i++ { - for i := 0; i <= EndPortRange-BeginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - b.Fatal(err) - } - - if expected := BeginPortRange + i; port != expected { - b.Fatalf("Expected port %d got %d", expected, port) - } - } - reset() - } -} - -func TestPortAllocation(t *testing.T) { - defer reset() - - ip := net.ParseIP("192.168.0.1") - ip2 := net.ParseIP("192.168.0.2") - if port, err := RequestPort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } else if port != 80 { - t.Fatalf("Acquire(80) should return 80, not %d", port) - } - port, err := RequestPort(ip, "tcp", 0) - if err != nil { - t.Fatal(err) - } - if port <= 0 { - t.Fatalf("Acquire(0) should return a non-zero port") - } - - if _, err := RequestPort(ip, "tcp", port); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - - if newPort, err := RequestPort(ip, "tcp", 0); err != nil { - t.Fatal(err) - } else if newPort == port { - t.Fatalf("Acquire(0) allocated the same port twice: %d", port) - } - - if _, err := RequestPort(ip, "tcp", 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if _, err := RequestPort(ip2, "tcp", 80); err != nil { - t.Fatalf("It should be possible to allocate the same port on a different interface") - } - if _, err := RequestPort(ip2, "tcp", 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if err := ReleasePort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } - if _, err := RequestPort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/portmapper/mapper.go docker.io-1.3.2~dfsg1/networkdriver/portmapper/mapper.go --- docker.io-0.9.1~dfsg1/networkdriver/portmapper/mapper.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/portmapper/mapper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -package portmapper - -import ( - "errors" - "fmt" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" - "net" - "sync" -) - -type mapping struct { - proto string - userlandProxy proxy.Proxy - host net.Addr - container net.Addr -} - -var ( - chain *iptables.Chain - lock sync.Mutex - - // udp:ip:port - currentMappings = make(map[string]*mapping) - newProxy = proxy.NewProxy -) - -var ( - ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") - ErrPortMappedForIP = errors.New("port is already mapped to ip") - ErrPortNotMapped = errors.New("port is not mapped") -) - -func SetIptablesChain(c *iptables.Chain) { - chain = c -} - -func Map(container net.Addr, hostIP net.IP, hostPort int) error { - lock.Lock() - defer lock.Unlock() - - var m *mapping - switch container.(type) { - case *net.TCPAddr: - m = &mapping{ - proto: "tcp", - host: &net.TCPAddr{IP: hostIP, Port: hostPort}, - container: container, - } - case *net.UDPAddr: - m = &mapping{ - proto: "udp", - host: &net.UDPAddr{IP: hostIP, Port: hostPort}, - container: container, - } - default: - return ErrUnknownBackendAddressType - } - - key := getKey(m.host) - if _, exists := currentMappings[key]; exists { - return ErrPortMappedForIP - } - - containerIP, containerPort := getIPAndPort(m.container) - if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - return err - } - - p, err := newProxy(m.host, m.container) - if err != nil { - // need to undo the iptables rules before we reutrn - forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) - return err - } - - m.userlandProxy = p - currentMappings[key] = m - - go p.Run() - - return nil -} - -func Unmap(host net.Addr) error { - lock.Lock() - defer lock.Unlock() - - key := getKey(host) - data, exists := currentMappings[key] - if !exists { - return ErrPortNotMapped - } - - data.userlandProxy.Close() - delete(currentMappings, key) - - containerIP, containerPort := getIPAndPort(data.container) - hostIP, hostPort := getIPAndPort(data.host) - if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - return err - } - return nil -} - -func getKey(a net.Addr) string { - switch t := a.(type) { - case *net.TCPAddr: - return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") - case *net.UDPAddr: - return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") - } - return "" -} - -func getIPAndPort(a net.Addr) (net.IP, int) { - switch t := a.(type) { - case *net.TCPAddr: - return t.IP, t.Port - case *net.UDPAddr: - return t.IP, t.Port - } - return nil, 0 -} - -func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { - if chain == nil { - return nil - } - return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/portmapper/mapper_test.go docker.io-1.3.2~dfsg1/networkdriver/portmapper/mapper_test.go --- docker.io-0.9.1~dfsg1/networkdriver/portmapper/mapper_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/portmapper/mapper_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -package portmapper - -import ( - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" - "net" - "testing" -) - -func init() { - // override this func to mock out the proxy server - newProxy = proxy.NewStubProxy -} - -func reset() { - chain = nil - currentMappings = make(map[string]*mapping) -} - -func TestSetIptablesChain(t *testing.T) { - defer reset() - - c := &iptables.Chain{ - Name: "TEST", - Bridge: "192.168.1.1", - } - - if chain != nil { - t.Fatal("chain should be nil at init") - } - - SetIptablesChain(c) - if chain == nil { - t.Fatal("chain should not be nil after set") - } -} - -func TestMapPorts(t *testing.T) { - dstIp1 := net.ParseIP("192.168.0.1") - dstIp2 := net.ParseIP("192.168.0.2") - dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} - dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} - - srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} - srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} - - if err := Map(srcAddr1, dstIp1, 80); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if Map(srcAddr1, dstIp1, 80) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if Map(srcAddr2, dstIp1, 80) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if err := Map(srcAddr2, dstIp2, 80); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if Unmap(dstAddr1) != nil { - t.Fatalf("Failed to release port") - } - - if Unmap(dstAddr2) != nil { - t.Fatalf("Failed to release port") - } - - if Unmap(dstAddr2) == nil { - t.Fatalf("Port already released, but no error reported") - } -} - -func TestGetUDPKey(t *testing.T) { - addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} - - key := getKey(addr) - - if expected := "192.168.1.5:53/udp"; key != expected { - t.Fatalf("expected key %s got %s", expected, key) - } -} - -func TestGetTCPKey(t *testing.T) { - addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} - - key := getKey(addr) - - if expected := "192.168.1.5:80/tcp"; key != expected { - t.Fatalf("expected key %s got %s", expected, key) - } -} - -func TestGetUDPIPAndPort(t *testing.T) { - addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} - - ip, port := getIPAndPort(addr) - if expected := "192.168.1.5"; ip.String() != expected { - t.Fatalf("expected ip %s got %s", expected, ip) - } - - if ep := 53; port != ep { - t.Fatalf("expected port %d got %d", ep, port) - } -} diff -Nru docker.io-0.9.1~dfsg1/networkdriver/utils.go docker.io-1.3.2~dfsg1/networkdriver/utils.go --- docker.io-0.9.1~dfsg1/networkdriver/utils.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/networkdriver/utils.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -package networkdriver - -import ( - "encoding/binary" - "errors" - "fmt" - "net" - - "github.com/dotcloud/docker/pkg/netlink" -) - -var ( - networkGetRoutesFct = netlink.NetworkGetRoutes - ErrNoDefaultRoute = errors.New("no default route") -) - -func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { - if len(nameservers) > 0 { - for _, ns := range nameservers { - _, nsNetwork, err := net.ParseCIDR(ns) - if err != nil { - return err - } - if NetworkOverlaps(toCheck, nsNetwork) { - return ErrNetworkOverlapsWithNameservers - } - } - } - return nil -} - -func CheckRouteOverlaps(toCheck *net.IPNet) error { - networks, err := networkGetRoutesFct() - if err != nil { - return err - } - - for _, network := range networks { - if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { - return ErrNetworkOverlaps - } - } - return nil -} - -// Detects overlap between one IPNet and another -func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { - if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { - return true - } - if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { - return true - } - return false -} - -// Calculates the first and last IP addresses in an IPNet -func NetworkRange(network *net.IPNet) (net.IP, net.IP) { - var ( - netIP = network.IP.To4() - firstIP = netIP.Mask(network.Mask) - lastIP = net.IPv4(0, 0, 0, 0).To4() - ) - - for i := 0; i < len(lastIP); i++ { - lastIP[i] = netIP[i] | ^network.Mask[i] - } - return firstIP, lastIP -} - -// Given a netmask, calculates the number of available hosts -func NetworkSize(mask net.IPMask) int32 { - m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { - m[i] = ^mask[i] - } - return int32(binary.BigEndian.Uint32(m)) + 1 -} - -// Return the IPv4 address of a network interface -func GetIfaceAddr(name string) (net.Addr, error) { - iface, err := net.InterfaceByName(name) - if err != nil { - return nil, err - } - addrs, err := iface.Addrs() - if err != nil { - return nil, err - } - var addrs4 []net.Addr - for _, addr := range addrs { - ip := (addr.(*net.IPNet)).IP - if ip4 := ip.To4(); len(ip4) == net.IPv4len { - addrs4 = append(addrs4, addr) - } - } - switch { - case len(addrs4) == 0: - return nil, fmt.Errorf("Interface %v has no IP addresses", name) - case len(addrs4) > 1: - fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", - name, (addrs4[0].(*net.IPNet)).IP) - } - return addrs4[0], nil -} - -func GetDefaultRouteIface() (*net.Interface, error) { - rs, err := networkGetRoutesFct() - if err != nil { - return nil, fmt.Errorf("unable to get routes: %v", err) - } - for _, r := range rs { - if r.Default { - return r.Iface, nil - } - } - return nil, ErrNoDefaultRoute -} diff -Nru docker.io-0.9.1~dfsg1/opts/envfile.go docker.io-1.3.2~dfsg1/opts/envfile.go --- docker.io-0.9.1~dfsg1/opts/envfile.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/opts/envfile.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,54 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +/* +Read in a line delimited file with environment variables enumerated +*/ +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + if strings.Contains(line, "=") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, nil +} + +var whiteSpaces = " \t" + +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff -Nru docker.io-0.9.1~dfsg1/opts/ip.go docker.io-1.3.2~dfsg1/opts/ip.go --- docker.io-0.9.1~dfsg1/opts/ip.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/opts/ip.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,31 @@ +package opts + +import ( + "fmt" + "net" +) + +type IpOpt struct { + *net.IP +} + +func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { + o := &IpOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +func (o *IpOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + (*o.IP) = net.ParseIP(val) + return nil +} + +func (o *IpOpt) String() string { + return (*o.IP).String() +} diff -Nru docker.io-0.9.1~dfsg1/opts/opts.go docker.io-1.3.2~dfsg1/opts/opts.go --- docker.io-0.9.1~dfsg1/opts/opts.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/opts/opts.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,229 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/api" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +func ListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, nil), names, usage) +} + +func HostListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, api.ValidateHost), names, usage) +} + +func IPListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateIPAddress), names, usage) +} + +func DnsSearchListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage) +} + +func IPVar(value *net.IP, names []string, defaultValue, usage string) { + flag.Var(NewIpOpt(value, defaultValue), names, usage) +} + +func MirrorListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateMirror), names, usage) +} + +// ListOpts type +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *newListOptsRef(&values, validator) +} + +func newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete remove the given element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +// FIXME: can we remove this? +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values' slice. +// FIXME: Can we remove this? +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// Get checks the existence of the given key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Validators +type ValidatorFctType func(val string) (string, error) + +func ValidateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR.") +} + +func ValidateLink(val string) (string, error) { + if _, err := parsers.PartParser("name:alias", val); err != nil { + return val, err + } + return val, nil +} + +func ValidatePath(val string) (string, error) { + var containerPath string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + + splited := strings.SplitN(val, ":", 2) + if len(splited) == 1 { + containerPath = splited[0] + val = filepath.Clean(splited[0]) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) + } + + if !filepath.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// Validates domain for resolvconf search configuration. +// A zero length domain is represented by . +func ValidateDnsSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +func ValidateExtraHost(val string) (string, error) { + arr := strings.Split(val, ":") + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %s", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("bad format for add-host: %s", val) + } + return val, nil +} + +// Validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil +} diff -Nru docker.io-0.9.1~dfsg1/opts/opts_test.go docker.io-1.3.2~dfsg1/opts/opts_test.go --- docker.io-0.9.1~dfsg1/opts/opts_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/opts/opts_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,90 @@ +package opts + +import ( + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestListOpts(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + o.String() +} + +func TestValidateDnsSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + } + + for _, domain := range valid { + if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/archive.go docker.io-1.3.2~dfsg1/pkg/archive/archive.go --- docker.io-0.9.1~dfsg1/pkg/archive/archive.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/archive.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,753 @@ +package archive + +import ( + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + Archive io.ReadCloser + ArchiveReader io.Reader + Compression int + TarOptions struct { + Includes []string + Excludes []string + Compression Compression + NoLchown bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} +) + +const ( + Uncompressed Compression = iota + Bzip2 + Gzip + Xz +) + +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + log.Debugf("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return CmdStream(exec.Command(args[0], args[1:]...), archive) +} + +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil { + return nil, err + } + log.Debugf("[tar autodetect] n: %v", bs) + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + + if fi.IsDir() && !strings.HasSuffix(name, "/") { + name = name + "/" + } + + hdr.Name = name + + stat, ok := fi.Sys().(*syscall.Stat_t) + if ok { + // Currently go does not fill in the major/minors + if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(stat.Rdev))) + hdr.Devminor = int64(minor(uint64(stat.Rdev))) + } + + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := tw.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + twBuf.Reset(tw) + _, err = io.Copy(twBuf, file) + file.Close() + if err != nil { + return err + } + err = twBuf.Flush() + if err != nil { + return err + } + twBuf.Reset(nil) + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + log.Debugf("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { + return err + } + + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + return err + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if hdr.Typeflag != tar.TypeSymlink { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } else { + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +func escapeName(name string) string { + escaped := make([]byte, 0) + for i, c := range []byte(name) { + if i == 0 && c == '/' { + continue + } + // all printable chars except "-" which is 0x2d + if (0x20 <= c && c <= 0x7E) && c != 0x2d { + escaped = append(escaped, c) + } else { + escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) + } + } + return string(escaped) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + tw := tar.NewWriter(compressWriter) + + go func() { + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + if options.Includes == nil { + options.Includes = []string{"."} + } + + twBuf := pools.BufioWriter32KPool.Get(nil) + defer pools.BufioWriter32KPool.Put(twBuf) + + for _, include := range options.Includes { + filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { + if err != nil { + log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil { + return nil + } + + skip, err := fileutils.Matches(relFilePath, options.Excludes) + if err != nil { + log.Debugf("Error matching %s", relFilePath, err) + return err + } + + if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", srcPath, err) + } + return nil + }) + } + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Debugf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + log.Debugf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + log.Debugf("Can't close pipe writer: %s", err) + } + }() + + return pipeReader, nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `path`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(archive io.Reader, dest string, options *TarOptions) error { + dest = filepath.Clean(dest) + + if options == nil { + options = &TarOptions{} + } + + if archive == nil { + return fmt.Errorf("Empty archive") + } + + if options.Excludes == nil { + options.Excludes = []string{} + } + + decompressedArchive, err := DecompressStream(archive) + if err != nil { + return err + } + defer decompressedArchive.Close() + + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/" + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.Excludes { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + if !strings.HasSuffix(hdr.Name, "/") { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0777) + if err != nil { + return err + } + } + } + + // Prevent symlink breakout + path := filepath.Join(dest, hdr.Name) + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if fi.IsDir() && hdr.Name == "." { + continue + } + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + + return nil +} + +func (archiver *Archiver) TarUntar(src, dst string) error { + log.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, nil) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + if err := archiver.Untar(archive, dst, nil); err != nil { + return err + } + return nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + // Create dst, copy src's content into it + log.Debugf("Creating dest directory: %s", dst) + if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + return err + } + log.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + log.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + // Clean up the trailing / + if dst[len(dst)-1] == '/' { + dst = path.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// CmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + if input != nil { + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + // Write stdin if any + go func() { + io.Copy(stdin, input) + stdin.Close() + }() + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + pipeR, pipeW := io.Pipe() + errChan := make(chan []byte) + // Collect stderr, we will use it in case of an error + go func() { + errText, e := ioutil.ReadAll(stderr) + if e != nil { + errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") + } + errChan <- errText + }() + // Copy stdout to the returned pipe + go func() { + _, err := io.Copy(pipeW, stdout) + if err != nil { + pipeW.CloseWithError(err) + } + errText := <-errChan + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + } else { + pipeW.Close() + } + }() + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if err = f.Sync(); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{f, size}, nil +} + +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + if err != nil { + os.Remove(archive.File.Name()) + } + return n, err +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/archive_test.go docker.io-1.3.2~dfsg1/pkg/archive/archive_test.go --- docker.io-0.9.1~dfsg1/pkg/archive/archive_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/archive_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,448 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + Excludes: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{Includes: []string{"1"}}, 1}, + {&TarOptions{Excludes: []string{"2"}}, 1}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.SetBytes(int64(n)) + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/changes.go docker.io-1.3.2~dfsg1/pkg/archive/changes.go --- docker.io-0.9.1~dfsg1/pkg/archive/changes.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/changes.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,411 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type ChangeType int + +const ( + ChangeModify = iota + ChangeAdd + ChangeDelete +) + +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + var kind string + switch change.Kind { + case ChangeModify: + kind = "C" + case ChangeAdd: + kind = "A" + case ChangeDelete: + kind = "D" + } + return fmt.Sprintf("%s %s", kind, change.Path) +} + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + var changes []Change + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + path = filepath.Join("/", path) + + // Skip root + if path == "/" { + return nil + } + + // Skip AUFS metadata + if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { + return err + } + + change := Change{ + Path: path, + } + + // Find out what kind of modification happened + file := filepath.Base(path) + // If there is a whiteout, then the file was removed + if strings.HasPrefix(file, ".wh.") { + originalFile := file[len(".wh."):] + change.Path = filepath.Join(filepath.Dir(path), originalFile) + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +type FileInfo struct { + parent *FileInfo + name string + stat syscall.Stat_t + children map[string]*FileInfo + capability []byte + added bool +} + +func (root *FileInfo) LookUp(path string) *FileInfo { + parent := root + if path == "/" { + return root + } + + pathElements := strings.Split(path, "/") + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + return "/" + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := &oldChild.stat + newStat := &newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if oldStat.Mode != newStat.Mode || + oldStat.Uid != newStat.Uid || + oldStat.Gid != newStat.Gid || + oldStat.Rdev != newStat.Rdev || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || + !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + root := &FileInfo{ + name: "/", + children: make(map[string]*FileInfo), + } + return root +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + relPath = filepath.Join("/", relPath) + + if relPath == "/" { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + if err := syscall.Lstat(path, &info.stat); err != nil { + return err + } + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + if oldDir != "" { + oldRoot, err1 = collectFileInfo(oldDir) + } + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, err + } + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var size int64 + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, _ := os.Lstat(file) + if fileInfo != nil && !fileInfo.IsDir() { + size += fileInfo.Size() + } + } + } + return size +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change) (Archive, error) { + reader, writer := io.Pipe() + tw := tar.NewWriter(writer) + + go func() { + twBuf := pools.BufioWriter32KPool.Get(nil) + defer pools.BufioWriter32KPool.Put(twBuf) + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := tw.WriteHeader(hdr); err != nil { + log.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Debugf("Can't close layer: %s", err) + } + writer.Close() + }() + return reader, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/changes_test.go docker.io-1.3.2~dfsg1/pkg/archive/changes_test.go --- docker.io-0.9.1~dfsg1/pkg/archive/changes_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/changes_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,301 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +// Helper to sort []Change by path +type byPath struct{ changes []Change } + +func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } +func (b byPath) Len() int { return len(b.changes) } +func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(byPath{changes}) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dir3", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/diff.go docker.io-1.3.2~dfsg1/pkg/archive/diff.go --- docker.io-0.9.1~dfsg1/pkg/archive/diff.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/diff.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,164 @@ +package archive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/pools" +) + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. +func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + layer, err := DecompressStream(layer) + if err != nil { + return err + } + + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + if !strings.HasSuffix(hdr.Name, "/") { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0600) + if err != nil { + return err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, ".wh..wh.") { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { + return err + } + } + continue + } + + path := filepath.Join(dest, hdr.Name) + base := filepath.Base(path) + + // Prevent symlink breakout + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } + + if strings.HasPrefix(base, ".wh.") { + originalBase := base[len(".wh."):] + originalPath := filepath.Join(filepath.Dir(path), originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return err + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + } + + return nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/diff_test.go docker.io-1.3.2~dfsg1/pkg/archive/diff_test.go --- docker.io-0.9.1~dfsg1/pkg/archive/diff_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/diff_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,191 @@ +package archive + +import ( + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/archive/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/archive/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/README.md docker.io-1.3.2~dfsg1/pkg/archive/README.md --- docker.io-0.9.1~dfsg1/pkg/archive/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/archive/testdata/broken.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/archive/testdata/broken.tar differ diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/time_linux.go docker.io-1.3.2~dfsg1/pkg/archive/time_linux.go --- docker.io-0.9.1~dfsg1/pkg/archive/time_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/time_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/time_unsupported.go docker.io-1.3.2~dfsg1/pkg/archive/time_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/archive/time_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/time_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/utils_test.go docker.io-1.3.2~dfsg1/pkg/archive/utils_test.go --- docker.io-0.9.1~dfsg1/pkg/archive/utils_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/utils_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,166 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + return ApplyLayer(dest, ArchiveReader(r)) + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/archive/wrap.go docker.io-1.3.2~dfsg1/pkg/archive/wrap.go --- docker.io-0.9.1~dfsg1/pkg/archive/wrap.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/archive/wrap.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,59 @@ +package archive + +import ( + "bytes" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff -Nru docker.io-0.9.1~dfsg1/pkg/broadcastwriter/broadcastwriter.go docker.io-1.3.2~dfsg1/pkg/broadcastwriter/broadcastwriter.go --- docker.io-0.9.1~dfsg1/pkg/broadcastwriter/broadcastwriter.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/broadcastwriter/broadcastwriter.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,101 @@ +package broadcastwriter + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" +) + +// BroadcastWriter accumulate multiple io.WriteCloser by stream. +type BroadcastWriter struct { + sync.Mutex + buf *bytes.Buffer + jsLogBuf *bytes.Buffer + streams map[string](map[io.WriteCloser]struct{}) +} + +// AddWriter adds new io.WriteCloser for stream. +// If stream is "", then all writes proceed as is. Otherwise every line from +// input will be packed to serialized jsonlog.JSONLog. +func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) { + w.Lock() + if _, ok := w.streams[stream]; !ok { + w.streams[stream] = make(map[io.WriteCloser]struct{}) + } + w.streams[stream][writer] = struct{}{} + w.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *BroadcastWriter) Write(p []byte) (n int, err error) { + created := time.Now().UTC() + w.Lock() + if writers, ok := w.streams[""]; ok { + for sw := range writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + delete(writers, sw) + } + } + } + if w.jsLogBuf == nil { + w.jsLogBuf = new(bytes.Buffer) + w.jsLogBuf.Grow(1024) + } + w.buf.Write(p) + for { + line, err := w.buf.ReadString('\n') + if err != nil { + w.buf.Write([]byte(line)) + break + } + for stream, writers := range w.streams { + if stream == "" { + continue + } + jsonLog := jsonlog.JSONLog{Log: line, Stream: stream, Created: created} + err = jsonLog.MarshalJSONBuf(w.jsLogBuf) + if err != nil { + log.Errorf("Error making JSON log line: %s", err) + continue + } + w.jsLogBuf.WriteByte('\n') + b := w.jsLogBuf.Bytes() + for sw := range writers { + if _, err := sw.Write(b); err != nil { + delete(writers, sw) + } + } + } + w.jsLogBuf.Reset() + } + w.jsLogBuf.Reset() + w.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *BroadcastWriter) Clean() error { + w.Lock() + for _, writers := range w.streams { + for w := range writers { + w.Close() + } + } + w.streams = make(map[string](map[io.WriteCloser]struct{})) + w.Unlock() + return nil +} + +func New() *BroadcastWriter { + return &BroadcastWriter{ + streams: make(map[string](map[io.WriteCloser]struct{})), + buf: bytes.NewBuffer(nil), + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/broadcastwriter/broadcastwriter_test.go docker.io-1.3.2~dfsg1/pkg/broadcastwriter/broadcastwriter_test.go --- docker.io-0.9.1~dfsg1/pkg/broadcastwriter/broadcastwriter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/broadcastwriter/broadcastwriter_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,144 @@ +package broadcastwriter + +import ( + "bytes" + "errors" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestBroadcastWriter(t *testing.T) { + writer := New() + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.AddWriter(bufferA, "") + bufferB := &dummyWriter{} + writer.AddWriter(bufferB, "") + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.AddWriter(bufferC, "") + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceBroadcastWriter(t *testing.T) { + writer := New() + c := make(chan bool) + go func() { + writer.AddWriter(devNullCloser(0), "") + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkBroadcastWriter(b *testing.B) { + writer := New() + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.AddWriter(devNullCloser(0), "stdout") + writer.AddWriter(devNullCloser(0), "stderr") + writer.AddWriter(devNullCloser(0), "") + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/cgroups/cgroups.go docker.io-1.3.2~dfsg1/pkg/cgroups/cgroups.go --- docker.io-0.9.1~dfsg1/pkg/cgroups/cgroups.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/cgroups/cgroups.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -package cgroups - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/pkg/mount" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" -) - -type Cgroup struct { - Name string `json:"name,omitempty"` - Parent string `json:"parent,omitempty"` - - DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice - Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) - MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) -} - -// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt -func FindCgroupMountpoint(subsystem string) (string, error) { - mounts, err := mount.GetMounts() - if err != nil { - return "", err - } - - for _, mount := range mounts { - if mount.Fstype == "cgroup" { - for _, opt := range strings.Split(mount.VfsOpts, ",") { - if opt == subsystem { - return mount.Mountpoint, nil - } - } - } - } - return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem) -} - -// Returns the relative path to the cgroup docker is running in. -func GetThisCgroupDir(subsystem string) (string, error) { - f, err := os.Open("/proc/self/cgroup") - if err != nil { - return "", err - } - defer f.Close() - - return parseCgroupFile(subsystem, f) -} - -func GetInitCgroupDir(subsystem string) (string, error) { - f, err := os.Open("/proc/1/cgroup") - if err != nil { - return "", err - } - defer f.Close() - - return parseCgroupFile(subsystem, f) -} - -func (c *Cgroup) Path(root, subsystem string) (string, error) { - cgroup := c.Name - if c.Parent != "" { - cgroup = filepath.Join(c.Parent, cgroup) - } - initPath, err := GetInitCgroupDir(subsystem) - if err != nil { - return "", err - } - return filepath.Join(root, subsystem, initPath, cgroup), nil -} - -func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) { - path, err := c.Path(root, subsystem) - if err != nil { - return "", err - } - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return "", err - } - if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { - return "", err - } - return path, nil -} - -func (c *Cgroup) Cleanup(root string) error { - get := func(subsystem string) string { - path, _ := c.Path(root, subsystem) - return path - } - - for _, path := range []string{ - get("memory"), - get("devices"), - get("cpu"), - } { - os.RemoveAll(path) - } - return nil -} - -func parseCgroupFile(subsystem string, r io.Reader) (string, error) { - s := bufio.NewScanner(r) - for s.Scan() { - if err := s.Err(); err != nil { - return "", err - } - text := s.Text() - parts := strings.Split(text, ":") - for _, subs := range strings.Split(parts[1], ",") { - if subs == subsystem { - return parts[2], nil - } - } - } - return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem) -} - -func writeFile(dir, file, data string) error { - return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) -} - -func (c *Cgroup) Apply(pid int) error { - // We have two implementation of cgroups support, one is based on - // systemd and the dbus api, and one is based on raw cgroup fs operations - // following the pre-single-writer model docs at: - // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ - // - // we can pick any subsystem to find the root - cgroupRoot, err := FindCgroupMountpoint("cpu") - if err != nil { - return err - } - cgroupRoot = filepath.Dir(cgroupRoot) - - if _, err := os.Stat(cgroupRoot); err != nil { - return fmt.Errorf("cgroups fs not found") - } - if err := c.setupDevices(cgroupRoot, pid); err != nil { - return err - } - if err := c.setupMemory(cgroupRoot, pid); err != nil { - return err - } - if err := c.setupCpu(cgroupRoot, pid); err != nil { - return err - } - return nil -} - -func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) { - if !c.DeviceAccess { - dir, err := c.Join(cgroupRoot, "devices", pid) - if err != nil { - return err - } - - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := writeFile(dir, "devices.deny", "a"); err != nil { - return err - } - - allow := []string{ - // /dev/null, zero, full - "c 1:3 rwm", - "c 1:5 rwm", - "c 1:7 rwm", - - // consoles - "c 5:1 rwm", - "c 5:0 rwm", - "c 4:0 rwm", - "c 4:1 rwm", - - // /dev/urandom,/dev/random - "c 1:9 rwm", - "c 1:8 rwm", - - // /dev/pts/ - pts namespaces are "coming soon" - "c 136:* rwm", - "c 5:2 rwm", - - // tuntap - "c 10:200 rwm", - } - - for _, val := range allow { - if err := writeFile(dir, "devices.allow", val); err != nil { - return err - } - } - } - return nil -} - -func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) { - if c.Memory != 0 || c.MemorySwap != 0 { - dir, err := c.Join(cgroupRoot, "memory", pid) - if err != nil { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if c.Memory != 0 { - if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { - return err - } - if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { - return err - } - } - // By default, MemorySwap is set to twice the size of RAM. - // If you want to omit MemorySwap, set it to `-1'. - if c.MemorySwap != -1 { - if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil { - return err - } - } - } - return nil -} - -func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) { - // We always want to join the cpu group, to allow fair cpu scheduling - // on a container basis - dir, err := c.Join(cgroupRoot, "cpu", pid) - if err != nil { - return err - } - if c.CpuShares != 0 { - if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil { - return err - } - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/cgroups/cgroups_test.go docker.io-1.3.2~dfsg1/pkg/cgroups/cgroups_test.go --- docker.io-0.9.1~dfsg1/pkg/cgroups/cgroups_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/cgroups/cgroups_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -package cgroups - -import ( - "bytes" - "testing" -) - -const ( - cgroupsContents = `11:hugetlb:/ -10:perf_event:/ -9:blkio:/ -8:net_cls:/ -7:freezer:/ -6:devices:/ -5:memory:/ -4:cpuacct,cpu:/ -3:cpuset:/ -2:name=systemd:/user.slice/user-1000.slice/session-16.scope` -) - -func TestParseCgroups(t *testing.T) { - r := bytes.NewBuffer([]byte(cgroupsContents)) - _, err := parseCgroupFile("blkio", r) - if err != nil { - t.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/cgroups/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/cgroups/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/cgroups/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/cgroups/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Michael Crosby (@crosbymichael) diff -Nru docker.io-0.9.1~dfsg1/pkg/chrootarchive/archive.go docker.io-1.3.2~dfsg1/pkg/chrootarchive/archive.go --- docker.io-0.9.1~dfsg1/pkg/chrootarchive/archive.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/chrootarchive/archive.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,90 @@ +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "runtime" + "strings" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func untar() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + options := new(archive.TarOptions) + dec := json.NewDecoder(strings.NewReader(flag.Arg(1))) + if err := dec.Decode(options); err != nil { + fatal(err) + } + if err := archive.Untar(os.Stdin, "/", options); err != nil { + fatal(err) + } + os.Exit(0) +} + +var ( + chrootArchiver = &archive.Archiver{Untar} +) + +func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(options); err != nil { + return fmt.Errorf("Untar json encode: %v", err) + } + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := os.MkdirAll(dest, 0777); err != nil { + return err + } + } + + cmd := reexec.Command("docker-untar", dest, buf.String()) + cmd.Stdin = archive + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Untar %s %s", err, out) + } + return nil +} + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/chrootarchive/archive_test.go docker.io-1.3.2~dfsg1/pkg/chrootarchive/archive_test.go --- docker.io-0.9.1~dfsg1/pkg/chrootarchive/archive_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/chrootarchive/archive_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,44 @@ +package chrootarchive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/chrootarchive/diff.go docker.io-1.3.2~dfsg1/pkg/chrootarchive/diff.go --- docker.io-0.9.1~dfsg1/pkg/chrootarchive/diff.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/chrootarchive/diff.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func applyLayer() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") + if err != nil { + fatal(err) + } + os.Setenv("TMPDIR", tmpDir) + if err := archive.ApplyLayer("/", os.Stdin); err != nil { + os.RemoveAll(tmpDir) + fatal(err) + } + os.RemoveAll(tmpDir) + os.Exit(0) +} + +func ApplyLayer(dest string, layer archive.ArchiveReader) error { + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("ApplyLayer %s %s", err, out) + } + return nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/chrootarchive/init.go docker.io-1.3.2~dfsg1/pkg/chrootarchive/init.go --- docker.io-0.9.1~dfsg1/pkg/chrootarchive/init.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/chrootarchive/init.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +package chrootarchive + +import ( + "fmt" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-untar", untar) + reexec.Register("docker-applyLayer", applyLayer) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/collections/orderedintset.go docker.io-1.3.2~dfsg1/pkg/collections/orderedintset.go --- docker.io-0.9.1~dfsg1/pkg/collections/orderedintset.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/collections/orderedintset.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -package collections - -import ( - "sync" -) - -// OrderedIntSet is a thread-safe sorted set and a stack. -type OrderedIntSet struct { - sync.RWMutex - set []int -} - -// NewOrderedSet returns an initialized OrderedSet -func NewOrderedIntSet() *OrderedIntSet { - return &OrderedIntSet{} -} - -// Push takes a string and adds it to the set. If the elem aready exists, it has no effect. -func (s *OrderedIntSet) Push(elem int) { - s.RLock() - for _, e := range s.set { - if e == elem { - s.RUnlock() - return - } - } - s.RUnlock() - - s.Lock() - - // Make sure the list is always sorted - for i, e := range s.set { - if elem < e { - s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) - s.Unlock() - return - } - } - // If we reach here, then elem is the biggest elem of the list. - s.set = append(s.set, elem) - s.Unlock() -} - -// Pop is an alias to PopFront() -func (s *OrderedIntSet) Pop() int { - return s.PopFront() -} - -// Pop returns the first elemen from the list and removes it. -// If the list is empty, it returns 0 -func (s *OrderedIntSet) PopFront() int { - s.RLock() - - for i, e := range s.set { - ret := e - s.RUnlock() - s.Lock() - s.set = append(s.set[:i], s.set[i+1:]...) - s.Unlock() - return ret - } - s.RUnlock() - - return 0 -} - -// PullBack retrieve the last element of the list. -// The element is not removed. -// If the list is empty, an empty element is returned. -func (s *OrderedIntSet) PullBack() int { - if len(s.set) == 0 { - return 0 - } - return s.set[len(s.set)-1] -} - -// Exists checks if the given element present in the list. -func (s *OrderedIntSet) Exists(elem int) bool { - for _, e := range s.set { - if e == elem { - return true - } - } - return false -} - -// Remove removes an element from the list. -// If the element is not found, it has no effect. -func (s *OrderedIntSet) Remove(elem int) { - for i, e := range s.set { - if e == elem { - s.set = append(s.set[:i], s.set[i+1:]...) - return - } - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/fileutils/fileutils.go docker.io-1.3.2~dfsg1/pkg/fileutils/fileutils.go --- docker.io-0.9.1~dfsg1/pkg/fileutils/fileutils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/fileutils/fileutils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,26 @@ +package fileutils + +import ( + "github.com/docker/docker/pkg/log" + "path/filepath" +) + +// Matches returns true if relFilePath matches any of the patterns +func Matches(relFilePath string, patterns []string) (bool, error) { + for _, exclude := range patterns { + matched, err := filepath.Match(exclude, relFilePath) + if err != nil { + log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) + return false, err + } + if matched { + if filepath.Clean(relFilePath) == "." { + log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) + continue + } + log.Debugf("Skipping excluded path: %s", relFilePath) + return true, nil + } + } + return false, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/graphdb/conn_linux.go docker.io-1.3.2~dfsg1/pkg/graphdb/conn_linux.go --- docker.io-0.9.1~dfsg1/pkg/graphdb/conn_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/graphdb/conn_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -// +build amd64 - -package graphdb - -import ( - _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite - "database/sql" - "os" -) - -func NewSqliteConn(root string) (*Database, error) { - initDatabase := false - if _, err := os.Stat(root); err != nil { - if os.IsNotExist(err) { - initDatabase = true - } else { - return nil, err - } - } - conn, err := sql.Open("sqlite3", root) - if err != nil { - return nil, err - } - return NewDatabase(conn, initDatabase) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/graphdb/conn_sqlite3.go docker.io-1.3.2~dfsg1/pkg/graphdb/conn_sqlite3.go --- docker.io-0.9.1~dfsg1/pkg/graphdb/conn_sqlite3.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/graphdb/conn_sqlite3.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,34 @@ +// +build cgo + +package graphdb + +import ( + "database/sql" + "os" + + _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite +) + +func NewSqliteConn(root string) (*Database, error) { + initDatabase := false + + stat, err := os.Stat(root) + if err != nil { + if os.IsNotExist(err) { + initDatabase = true + } else { + return nil, err + } + } + + if stat != nil && stat.Size() == 0 { + initDatabase = true + } + + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + + return NewDatabase(conn, initDatabase) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/graphdb/conn_unsupported.go docker.io-1.3.2~dfsg1/pkg/graphdb/conn_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/graphdb/conn_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/graphdb/conn_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,4 @@ -// +build !linux !amd64 +// +build !cgo package graphdb diff -Nru docker.io-0.9.1~dfsg1/pkg/graphdb/graphdb.go docker.io-1.3.2~dfsg1/pkg/graphdb/graphdb.go --- docker.io-0.9.1~dfsg1/pkg/graphdb/graphdb.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/graphdb/graphdb.go 2014-11-24 17:38:01.000000000 +0000 @@ -64,6 +64,11 @@ if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { return true } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } return false } @@ -276,6 +281,18 @@ return db.children(e, name, depth, nil) } +// Return the parents of a specified entity +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + // Return the refrence count for a specified id func (db *Database) Refs(id string) int { db.mux.RLock() @@ -461,6 +478,28 @@ return entities, nil } +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentId string + if err := rows.Scan(&parentId); err != nil { + return nil, err + } + parents = append(parents, parentId) + } + + return parents, nil +} + // Return the entity based on the parent path and name func (db *Database) child(parent *Entity, name string) *Entity { var id string diff -Nru docker.io-0.9.1~dfsg1/pkg/graphdb/graphdb_test.go docker.io-1.3.2~dfsg1/pkg/graphdb/graphdb_test.go --- docker.io-0.9.1~dfsg1/pkg/graphdb/graphdb_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/graphdb/graphdb_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,13 +1,14 @@ package graphdb import ( - _ "code.google.com/p/gosqlite/sqlite3" "database/sql" "fmt" "os" "path" "strconv" "testing" + + _ "code.google.com/p/gosqlite/sqlite3" ) func newTestDb(t *testing.T) (*Database, string) { @@ -33,7 +34,7 @@ defer destroyTestDb(dbpath) } -func TestCreateRootEnity(t *testing.T) { +func TestCreateRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) root := db.RootEntity() @@ -93,6 +94,84 @@ } } +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 2 entries for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + func TestListAllRootChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) @@ -535,6 +614,6 @@ } } if any { - t.Fatal() + t.Fail() } } diff -Nru docker.io-0.9.1~dfsg1/pkg/httputils/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/httputils/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/httputils/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/httputils/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/pkg/httputils/resumablerequestreader.go docker.io-1.3.2~dfsg1/pkg/httputils/resumablerequestreader.go --- docker.io-0.9.1~dfsg1/pkg/httputils/resumablerequestreader.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/httputils/resumablerequestreader.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,93 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/docker/pkg/log" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + log.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/ioutils/readers.go docker.io-1.3.2~dfsg1/pkg/ioutils/readers.go --- docker.io-0.9.1~dfsg1/pkg/ioutils/readers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/ioutils/readers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,114 @@ +package ioutils + +import ( + "bytes" + "io" + "sync" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +type bufReader struct { + sync.Mutex + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte +} + +func NewBufReader(r io.Reader) *bufReader { + reader := &bufReader{ + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { + reader := &bufReader{ + buf: buffer, + drainBuf: drainBuffer, + reader: r, + } + reader.wait.L = &reader.Mutex + go reader.drain() + return reader +} + +func (r *bufReader) drain() { + for { + n, err := r.reader.Read(r.drainBuf) + r.Lock() + if err != nil { + r.err = err + } else { + r.buf.Write(r.drainBuf[0:n]) + } + r.wait.Signal() + r.Unlock() + if err != nil { + break + } + } +} + +func (r *bufReader) Read(p []byte) (n int, err error) { + r.Lock() + defer r.Unlock() + for { + n, err = r.buf.Read(p) + if n > 0 { + return n, err + } + if r.err != nil { + return 0, r.err + } + r.wait.Wait() + } +} + +func (r *bufReader) Close() error { + closer, ok := r.reader.(io.ReadCloser) + if !ok { + return nil + } + return closer.Close() +} diff -Nru docker.io-0.9.1~dfsg1/pkg/ioutils/readers_test.go docker.io-1.3.2~dfsg1/pkg/ioutils/readers_test.go --- docker.io-0.9.1~dfsg1/pkg/ioutils/readers_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/ioutils/readers_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,34 @@ +package ioutils + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/ioutils/writers.go docker.io-1.3.2~dfsg1/pkg/ioutils/writers.go --- docker.io-0.9.1~dfsg1/pkg/ioutils/writers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/ioutils/writers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,39 @@ +package ioutils + +import "io" + +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +type NopFlusher struct{} + +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/iptables/iptables.go docker.io-1.3.2~dfsg1/pkg/iptables/iptables.go --- docker.io-0.9.1~dfsg1/pkg/iptables/iptables.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/iptables/iptables.go 2014-11-24 17:38:01.000000000 +0000 @@ -6,6 +6,7 @@ "net" "os" "os/exec" + "regexp" "strconv" "strings" ) @@ -20,6 +21,7 @@ var ( ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} + supportsXlock = false ) type Chain struct { @@ -27,6 +29,10 @@ Bridge string } +func init() { + supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil +} + func NewChain(name, bridge string) (*Chain, error) { if output, err := Raw("-t", "nat", "-N", name); err != nil { return nil, err @@ -136,10 +142,27 @@ // Check if an existing rule exists func Exists(args ...string) bool { - if _, err := Raw(append([]string{"-C"}, args...)...); err != nil { - return false - } - return true + // iptables -C, --check option was added in v.1.4.11 + // http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt + + // try -C + // if exit status is 0 then return true, the rule exists + if _, err := Raw(append([]string{"-C"}, args...)...); err == nil { + return true + } + + // parse iptables-save for the rule + rule := strings.Replace(strings.Join(args, " "), "-t nat ", "", -1) + existingRules, _ := exec.Command("iptables-save").Output() + + // regex to replace ips in rule + // because MASQUERADE rule will not be exactly what was passed + re := regexp.MustCompile(`[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}`) + + return strings.Contains( + re.ReplaceAllString(string(existingRules), "?"), + re.ReplaceAllString(rule, "?"), + ) } func Raw(args ...string) ([]byte, error) { @@ -147,12 +170,24 @@ if err != nil { return nil, ErrIptablesNotFound } + + if supportsXlock { + args = append([]string{"--wait"}, args...) + } + if os.Getenv("DEBUG") != "" { - fmt.Printf("[DEBUG] [iptables]: %s, %v\n", path, args) + fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) } + output, err := exec.Command(path, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) } + + // ignore iptables' message about xtables lock + if strings.Contains(string(output), "waiting for it to exit") { + output = []byte("") + } + return output, err } diff -Nru docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog.go docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog.go --- docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,53 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "io" + "log" + "time" +) + +type JSONLog struct { + Log string `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` +} + +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil +} + +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} + +func WriteLog(src io.Reader, dst io.Writer, format string) error { + dec := json.NewDecoder(src) + l := &JSONLog{} + for { + if err := dec.Decode(l); err == io.EOF { + return nil + } else if err != nil { + log.Printf("Error streaming logs: %s", err) + return err + } + line, err := l.Format(format) + if err != nil { + return err + } + if _, err := io.WriteString(dst, line); err != nil { + return err + } + l.Reset() + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog_marshalling.go docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog_marshalling.go --- docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog_marshalling.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog_marshalling.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,176 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make shell BINDDIR=. +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +//+ +//+ "github.com/docker/docker/pkg/timeutils" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = timeutils.FastMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" + + "github.com/docker/docker/pkg/timeutils" +) + +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first bool = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"log":`) + ffjson_WriteJsonString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjson_WriteJsonString(buf, mj.Stream) + } + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = timeutils.FastMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjson_WriteJsonString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff -Nru docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog_test.go docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog_test.go --- docker.io-0.9.1~dfsg1/pkg/jsonlog/jsonlog_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/jsonlog/jsonlog_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "regexp" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/timeutils" +) + +func TestWriteLog(t *testing.T) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + testLine := "Line that thinks that it is log line from docker\n" + for i := 0; i < 30; i++ { + e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) + } + w := bytes.NewBuffer(nil) + format := timeutils.RFC3339NanoFixed + if err := WriteLog(&buf, w, format); err != nil { + t.Fatal(err) + } + res := w.String() + t.Logf("Result of WriteLog: %q", res) + lines := strings.Split(strings.TrimSpace(res), "\n") + if len(lines) != 30 { + t.Fatalf("Must be 30 lines but got %d", len(lines)) + } + logRe := regexp.MustCompile(`\[.*\] Line that thinks that it is log line from docker`) + for _, l := range lines { + if !logRe.MatchString(l) { + t.Fatalf("Log line not in expected format: %q", l) + } + } +} + +func BenchmarkWriteLog(b *testing.B) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + testLine := "Line that thinks that it is log line from docker\n" + for i := 0; i < 30; i++ { + e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) + } + r := bytes.NewReader(buf.Bytes()) + w := ioutil.Discard + format := timeutils.RFC3339NanoFixed + b.SetBytes(int64(r.Len())) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := WriteLog(r, w, format); err != nil { + b.Fatal(err) + } + b.StopTimer() + r.Seek(0, 0) + b.StartTimer() + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/apparmor_disabled.go docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/apparmor_disabled.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/apparmor_disabled.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/apparmor_disabled.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -// +build !apparmor !linux !amd64 - -package apparmor - -import () - -func IsEnabled() bool { - return false -} - -func ApplyProfile(pid int, name string) error { - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/apparmor.go docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/apparmor.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/apparmor.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/apparmor.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -// +build apparmor,linux,amd64 - -package apparmor - -// #cgo LDFLAGS: -lapparmor -// #include -// #include -import "C" -import ( - "io/ioutil" - "unsafe" -) - -func IsEnabled() bool { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") - return err == nil && len(buf) > 1 && buf[0] == 'Y' -} - -func ApplyProfile(pid int, name string) error { - if !IsEnabled() || name == "" { - return nil - } - - cName := C.CString(name) - defer C.free(unsafe.Pointer(cName)) - - if _, err := C.aa_change_onexec(cName); err != nil { - return err - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/setup.go docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/setup.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/apparmor/setup.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/apparmor/setup.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -package apparmor - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" -) - -const DefaultProfilePath = "/etc/apparmor.d/docker" -const DefaultProfile = ` -# AppArmor profile from lxc for containers. -@{HOME}=@{HOMEDIRS}/*/ /root/ -@{HOMEDIRS}=/home/ -#@{HOMEDIRS}+= -@{multiarch}=*-linux-gnu* -@{PROC}=/proc/ - -profile docker-default flags=(attach_disconnected,mediate_deleted) { - network, - capability, - file, - umount, - - # ignore DENIED message on / remount - deny mount options=(ro, remount) -> /, - - # allow tmpfs mounts everywhere - mount fstype=tmpfs, - - # allow mqueue mounts everywhere - mount fstype=mqueue, - - # allow fuse mounts everywhere - mount fstype=fuse.*, - - # allow bind mount of /lib/init/fstab for lxcguest - mount options=(rw, bind) /lib/init/fstab.lxc/ -> /lib/init/fstab/, - - # deny writes in /proc/sys/fs but allow binfmt_misc to be mounted - mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, - deny @{PROC}/sys/fs/** wklx, - - # allow efivars to be mounted, writing to it will be blocked though - mount fstype=efivarfs -> /sys/firmware/efi/efivars/, - - # block some other dangerous paths - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, - deny @{PROC}/sys/kernel/*/** wklx, - - # deny writes in /sys except for /sys/fs/cgroup, also allow - # fusectl, securityfs and debugfs to be mounted there (read-only) - mount fstype=fusectl -> /sys/fs/fuse/connections/, - mount fstype=securityfs -> /sys/kernel/security/, - mount fstype=debugfs -> /sys/kernel/debug/, - deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, - mount fstype=proc -> /proc/, - mount fstype=sysfs -> /sys/, - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, - mount options=(move) /sys/fs/cgroup/cgmanager/ -> /sys/fs/cgroup/cgmanager.lower/, - - # the container may never be allowed to mount devpts. If it does, it - # will remount the host's devpts. We could allow it to do it with - # the newinstance option (but, right now, we don't). - deny mount fstype=devpts, -} -` - -func InstallDefaultProfile() error { - if !IsEnabled() { - return nil - } - - // If the profile already exists, let it be. - if _, err := os.Stat(DefaultProfilePath); err == nil { - return nil - } - - // Make sure /etc/apparmor.d exists - if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { - return err - } - - if err := ioutil.WriteFile(DefaultProfilePath, []byte(DefaultProfile), 0644); err != nil { - return err - } - - output, err := exec.Command("/lib/init/apparmor-profile-load", "docker").CombinedOutput() - if err != nil { - return fmt.Errorf("Error loading docker profile: %s (%s)", err, output) - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/capabilities/capabilities.go docker.io-1.3.2~dfsg1/pkg/libcontainer/capabilities/capabilities.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/capabilities/capabilities.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/capabilities/capabilities.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -package capabilities - -import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/syndtr/gocapability/capability" - "os" -) - -// DropCapabilities drops capabilities for the current process based -// on the container's configuration. -func DropCapabilities(container *libcontainer.Container) error { - if drop := getCapabilities(container); len(drop) > 0 { - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - c.Unset(capability.CAPS|capability.BOUNDS, drop...) - - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } - } - return nil -} - -// getCapabilities returns the specific cap values for the libcontainer types -func getCapabilities(container *libcontainer.Container) []capability.Cap { - drop := []capability.Cap{} - for _, c := range container.Capabilities { - drop = append(drop, c.Value) - } - return drop -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/container.go docker.io-1.3.2~dfsg1/pkg/libcontainer/container.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/container.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/container.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -package libcontainer - -import ( - "github.com/dotcloud/docker/pkg/cgroups" -) - -// Context is a generic key value pair that allows -// arbatrary data to be sent -type Context map[string]string - -// Container defines configuration options for how a -// container is setup inside a directory and how a process should be executed -type Container struct { - Hostname string `json:"hostname,omitempty"` // hostname - ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly - NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk - User string `json:"user,omitempty"` // user to execute the process as - WorkingDir string `json:"working_dir,omitempty"` // current working directory - Env []string `json:"environment,omitempty"` // environment to set - Tty bool `json:"tty,omitempty"` // setup a proper tty or not - Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply - Capabilities Capabilities `json:"capabilities,omitempty"` // capabilities to drop - Networks []*Network `json:"networks,omitempty"` // nil for host's network stack - Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups - Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) -} - -// Network defines configuration for a container's networking stack -// -// The network configuration can be omited from a container causing the -// container to be setup with the host's networking stack -type Network struct { - Type string `json:"type,omitempty"` // type of networking to setup i.e. veth, macvlan, etc - Context Context `json:"context,omitempty"` // generic context for type specific networking options - Address string `json:"address,omitempty"` - Gateway string `json:"gateway,omitempty"` - Mtu int `json:"mtu,omitempty"` -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/container.json docker.io-1.3.2~dfsg1/pkg/libcontainer/container.json --- docker.io-0.9.1~dfsg1/pkg/libcontainer/container.json 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/container.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -{ - "hostname": "koye", - "tty": true, - "environment": [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "namespaces": [ - "NEWIPC", - "NEWNS", - "NEWPID", - "NEWUTS", - "NEWNET" - ], - "capabilities": [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" - ], - "networks": [{ - "type": "veth", - "context": { - "bridge": "docker0", - "prefix": "dock" - }, - "address": "172.17.0.100/16", - "gateway": "172.17.42.1", - "mtu": 1500 - } - ], - "cgroups": { - "name": "docker-koye", - "parent": "docker", - "memory": 5248000 - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/libcontainer/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/libcontainer/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/network/loopback.go docker.io-1.3.2~dfsg1/pkg/libcontainer/network/loopback.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/network/loopback.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/network/loopback.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -package network - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" -) - -// Loopback is a network strategy that provides a basic loopback device -type Loopback struct { -} - -func (l *Loopback) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { - return nil -} - -func (l *Loopback) Initialize(config *libcontainer.Network, context libcontainer.Context) error { - if err := SetMtu("lo", config.Mtu); err != nil { - return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err) - } - if err := InterfaceUp("lo"); err != nil { - return fmt.Errorf("lo up %s", err) - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/network/network.go docker.io-1.3.2~dfsg1/pkg/libcontainer/network/network.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/network/network.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/network/network.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -package network - -import ( - "github.com/dotcloud/docker/pkg/netlink" - "net" -) - -func InterfaceUp(name string) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - return netlink.NetworkLinkUp(iface) -} - -func InterfaceDown(name string) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - return netlink.NetworkLinkDown(iface) -} - -func ChangeInterfaceName(old, newName string) error { - iface, err := net.InterfaceByName(old) - if err != nil { - return err - } - return netlink.NetworkChangeName(iface, newName) -} - -func CreateVethPair(name1, name2 string) error { - return netlink.NetworkCreateVethPair(name1, name2) -} - -func SetInterfaceInNamespacePid(name string, nsPid int) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - return netlink.NetworkSetNsPid(iface, nsPid) -} - -func SetInterfaceMaster(name, master string) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - masterIface, err := net.InterfaceByName(master) - if err != nil { - return err - } - return netlink.NetworkSetMaster(iface, masterIface) -} - -func SetDefaultGateway(ip string) error { - return netlink.AddDefaultGw(net.ParseIP(ip)) -} - -func SetInterfaceIp(name string, rawIp string) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - ip, ipNet, err := net.ParseCIDR(rawIp) - if err != nil { - return err - } - return netlink.NetworkLinkAddIp(iface, ip, ipNet) -} - -func SetMtu(name string, mtu int) error { - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - return netlink.NetworkSetMTU(iface, mtu) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/network/strategy.go docker.io-1.3.2~dfsg1/pkg/libcontainer/network/strategy.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/network/strategy.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/network/strategy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -package network - -import ( - "errors" - "github.com/dotcloud/docker/pkg/libcontainer" -) - -var ( - ErrNotValidStrategyType = errors.New("not a valid network strategy type") -) - -var strategies = map[string]NetworkStrategy{ - "veth": &Veth{}, - "loopback": &Loopback{}, -} - -// NetworkStrategy represents a specific network configuration for -// a container's networking stack -type NetworkStrategy interface { - Create(*libcontainer.Network, int, libcontainer.Context) error - Initialize(*libcontainer.Network, libcontainer.Context) error -} - -// GetStrategy returns the specific network strategy for the -// provided type. If no strategy is registered for the type an -// ErrNotValidStrategyType is returned. -func GetStrategy(tpe string) (NetworkStrategy, error) { - s, exists := strategies[tpe] - if !exists { - return nil, ErrNotValidStrategyType - } - return s, nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/network/veth.go docker.io-1.3.2~dfsg1/pkg/libcontainer/network/veth.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/network/veth.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/network/veth.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -package network - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/utils" -) - -// Veth is a network strategy that uses a bridge and creates -// a veth pair, one that stays outside on the host and the other -// is placed inside the container's namespace -type Veth struct { -} - -func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { - var ( - bridge string - prefix string - exists bool - ) - if bridge, exists = n.Context["bridge"]; !exists { - return fmt.Errorf("bridge does not exist in network context") - } - if prefix, exists = n.Context["prefix"]; !exists { - return fmt.Errorf("veth prefix does not exist in network context") - } - name1, name2, err := createVethPair(prefix) - if err != nil { - return err - } - context["veth-host"] = name1 - context["veth-child"] = name2 - if err := SetInterfaceMaster(name1, bridge); err != nil { - return err - } - if err := SetMtu(name1, n.Mtu); err != nil { - return err - } - if err := InterfaceUp(name1); err != nil { - return err - } - if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { - return err - } - return nil -} - -func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Context) error { - var ( - vethChild string - exists bool - ) - if vethChild, exists = context["veth-child"]; !exists { - return fmt.Errorf("vethChild does not exist in network context") - } - if err := InterfaceDown(vethChild); err != nil { - return fmt.Errorf("interface down %s %s", vethChild, err) - } - if err := ChangeInterfaceName(vethChild, "eth0"); err != nil { - return fmt.Errorf("change %s to eth0 %s", vethChild, err) - } - if err := SetInterfaceIp("eth0", config.Address); err != nil { - return fmt.Errorf("set eth0 ip %s", err) - } - if err := SetMtu("eth0", config.Mtu); err != nil { - return fmt.Errorf("set eth0 mtu to %d %s", config.Mtu, err) - } - if err := InterfaceUp("eth0"); err != nil { - return fmt.Errorf("eth0 up %s", err) - } - if config.Gateway != "" { - if err := SetDefaultGateway(config.Gateway); err != nil { - return fmt.Errorf("set gateway to %s %s", config.Gateway, err) - } - } - return nil -} - -// createVethPair will automatically generage two random names for -// the veth pair and ensure that they have been created -func createVethPair(prefix string) (name1 string, name2 string, err error) { - name1, err = utils.GenerateRandomName(prefix, 4) - if err != nil { - return - } - name2, err = utils.GenerateRandomName(prefix, 4) - if err != nil { - return - } - if err = CreateVethPair(name1, name2); err != nil { - return - } - return -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/command.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/command.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/command.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/command.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -package nsinit - -import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/system" - "os" - "os/exec" -) - -// CommandFactory takes the container's configuration and options passed by the -// parent processes and creates an *exec.Cmd that will be used to fork/exec the -// namespaced init process -type CommandFactory interface { - Create(container *libcontainer.Container, console string, syncFd *os.File, args []string) *exec.Cmd -} - -type DefaultCommandFactory struct { - Root string -} - -// Create will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -func (c *DefaultCommandFactory) Create(container *libcontainer.Container, console string, pipe *os.File, args []string) *exec.Cmd { - // get our binary name from arg0 so we can always reexec ourself - command := exec.Command(os.Args[0], append([]string{ - "-console", console, - "-pipe", "3", - "-root", c.Root, - "init"}, args...)...) - - system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) - command.Env = container.Env - command.ExtraFiles = []*os.File{pipe} - return command -} - -// GetNamespaceFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { - for _, ns := range namespaces { - flag |= ns.Value - } - return flag -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/exec.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/exec.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/exec.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/exec.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -// +build linux - -package nsinit - -import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/system" - "os" - "os/exec" - "syscall" -) - -// Exec performes setup outside of a namespace so that a container can be -// executed. Exec is a high level function for working with container namespaces. -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { - var ( - master *os.File - console string - err error - ) - - // create a pipe so that we can syncronize with the namespaced process and - // pass the veth name to the child - syncPipe, err := NewSyncPipe() - if err != nil { - return -1, err - } - - if container.Tty { - master, console, err = system.CreateMasterAndConsole() - if err != nil { - return -1, err - } - term.SetMaster(master) - } - - command := ns.commandFactory.Create(container, console, syncPipe.child, args) - if err := term.Attach(command); err != nil { - return -1, err - } - defer term.Close() - - if err := command.Start(); err != nil { - return -1, err - } - if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil { - command.Process.Kill() - return -1, err - } - defer ns.stateWriter.DeletePid() - - // Do this before syncing with child so that no children - // can escape the cgroup - if err := ns.SetupCgroups(container, command.Process.Pid); err != nil { - command.Process.Kill() - return -1, err - } - if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { - command.Process.Kill() - return -1, err - } - - // Sync with child - syncPipe.Close() - - if err := command.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { - return -1, err - } - } - return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil -} - -func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error { - if container.Cgroups != nil { - if err := container.Cgroups.Apply(nspid); err != nil { - return err - } - } - return nil -} - -func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { - context := libcontainer.Context{} - for _, config := range container.Networks { - strategy, err := network.GetStrategy(config.Type) - if err != nil { - return err - } - if err := strategy.Create(config, nspid, context); err != nil { - return err - } - } - return pipe.SendToChild(context) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/execin.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/execin.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/execin.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/execin.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -// +build linux - -package nsinit - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/system" - "os" - "path/filepath" - "strconv" - "syscall" -) - -// ExecIn uses an existing pid and joins the pid's namespaces with the new command. -func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { - for _, nsv := range container.Namespaces { - // skip the PID namespace on unshare because it it not supported - if nsv.Key != "NEWPID" { - if err := system.Unshare(nsv.Value); err != nil { - return -1, err - } - } - } - fds, err := ns.getNsFds(nspid, container) - closeFds := func() { - for _, f := range fds { - system.Closefd(f) - } - } - if err != nil { - closeFds() - return -1, err - } - - // foreach namespace fd, use setns to join an existing container's namespaces - for _, fd := range fds { - if fd > 0 { - if err := system.Setns(fd, 0); err != nil { - closeFds() - return -1, fmt.Errorf("setns %s", err) - } - } - system.Closefd(fd) - } - - // if the container has a new pid and mount namespace we need to - // remount proc and sys to pick up the changes - if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { - pid, err := system.Fork() - if err != nil { - return -1, err - } - if pid == 0 { - // TODO: make all raw syscalls to be fork safe - if err := system.Unshare(syscall.CLONE_NEWNS); err != nil { - return -1, err - } - if err := remountProc(); err != nil { - return -1, fmt.Errorf("remount proc %s", err) - } - if err := remountSys(); err != nil { - return -1, fmt.Errorf("remount sys %s", err) - } - goto dropAndExec - } - proc, err := os.FindProcess(pid) - if err != nil { - return -1, err - } - state, err := proc.Wait() - if err != nil { - return -1, err - } - os.Exit(state.Sys().(syscall.WaitStatus).ExitStatus()) - } -dropAndExec: - if err := finalizeNamespace(container); err != nil { - return -1, err - } - if err := system.Execv(args[0], args[0:], container.Env); err != nil { - return -1, err - } - panic("unreachable") -} - -func (ns *linuxNs) getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { - fds := make([]uintptr, len(container.Namespaces)) - for i, ns := range container.Namespaces { - f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) - if err != nil { - return fds, err - } - fds[i] = f.Fd() - } - return fds, nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/init.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/init.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/init.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/init.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -// +build linux - -package nsinit - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/apparmor" - "github.com/dotcloud/docker/pkg/libcontainer/capabilities" - "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/libcontainer/utils" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/pkg/user" - "os" - "syscall" -) - -// Init is the init process that first runs inside a new namespace to setup mounts, users, networking, -// and other options required for the new container. -func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { - rootfs, err := utils.ResolveRootfs(uncleanRootfs) - if err != nil { - return err - } - - // We always read this as it is a way to sync with the parent as well - context, err := syncPipe.ReadFromParent() - if err != nil { - syncPipe.Close() - return err - } - syncPipe.Close() - - if console != "" { - slave, err := system.OpenTerminal(console, syscall.O_RDWR) - if err != nil { - return fmt.Errorf("open terminal %s", err) - } - if err := dupSlave(slave); err != nil { - return fmt.Errorf("dup2 slave %s", err) - } - } - if _, err := system.Setsid(); err != nil { - return fmt.Errorf("setsid %s", err) - } - if console != "" { - if err := system.Setctty(); err != nil { - return fmt.Errorf("setctty %s", err) - } - } - // this is our best effort to let the process know that the parent has died and that it - // should it should act on it how it sees fit - if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { - return fmt.Errorf("parent death signal %s", err) - } - if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { - return fmt.Errorf("setup mount namespace %s", err) - } - if err := setupNetwork(container, context); err != nil { - return fmt.Errorf("setup networking %s", err) - } - if err := system.Sethostname(container.Hostname); err != nil { - return fmt.Errorf("sethostname %s", err) - } - if err := finalizeNamespace(container); err != nil { - return fmt.Errorf("finalize namespace %s", err) - } - - if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { - return err - } - return system.Execv(args[0], args[0:], container.Env) -} - -func setupUser(container *libcontainer.Container) error { - switch container.User { - case "root", "": - if err := system.Setgroups(nil); err != nil { - return err - } - if err := system.Setresgid(0, 0, 0); err != nil { - return err - } - if err := system.Setresuid(0, 0, 0); err != nil { - return err - } - default: - uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) - if err != nil { - return err - } - if err := system.Setgroups(suppGids); err != nil { - return err - } - if err := system.Setgid(gid); err != nil { - return err - } - if err := system.Setuid(uid); err != nil { - return err - } - } - return nil -} - -// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that -// the slave's fd is 0, or stdin -func dupSlave(slave *os.File) error { - if err := system.Dup2(slave.Fd(), 0); err != nil { - return err - } - if err := system.Dup2(slave.Fd(), 1); err != nil { - return err - } - if err := system.Dup2(slave.Fd(), 2); err != nil { - return err - } - return nil -} - -// setupVethNetwork uses the Network config if it is not nil to initialize -// the new veth interface inside the container for use by changing the name to eth0 -// setting the MTU and IP address along with the default gateway -func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error { - for _, config := range container.Networks { - strategy, err := network.GetStrategy(config.Type) - if err != nil { - return err - } - - err1 := strategy.Initialize(config, context) - if err1 != nil { - return err1 - } - } - return nil -} - -// finalizeNamespace drops the caps and sets the correct user -// and working dir before execing the command inside the namespace -func finalizeNamespace(container *libcontainer.Container) error { - if err := capabilities.DropCapabilities(container); err != nil { - return fmt.Errorf("drop capabilities %s", err) - } - if err := setupUser(container); err != nil { - return fmt.Errorf("setup user %s", err) - } - if container.WorkingDir != "" { - if err := system.Chdir(container.WorkingDir); err != nil { - return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) - } - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/mount.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/mount.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/mount.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/mount.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,277 +0,0 @@ -// +build linux - -package nsinit - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/system" - "io/ioutil" - "os" - "path/filepath" - "syscall" -) - -// default mount point flags -const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV - -// setupNewMountNamespace is used to initialize a new mount namespace for an new -// container in the rootfs that is specified. -// -// There is no need to unmount the new mounts because as soon as the mount namespace -// is no longer in use, the mounts will be removed automatically -func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error { - flag := syscall.MS_PRIVATE - if noPivotRoot { - flag = syscall.MS_SLAVE - } - if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { - return fmt.Errorf("mounting / as slave %s", err) - } - if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("mouting %s as bind %s", rootfs, err) - } - if readonly { - if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("mounting %s as readonly %s", rootfs, err) - } - } - if err := mountSystem(rootfs); err != nil { - return fmt.Errorf("mount system %s", err) - } - if err := copyDevNodes(rootfs); err != nil { - return fmt.Errorf("copy dev nodes %s", err) - } - // In non-privileged mode, this fails. Discard the error. - setupLoopbackDevices(rootfs) - if err := setupDev(rootfs); err != nil { - return err - } - if err := setupPtmx(rootfs, console); err != nil { - return err - } - if err := system.Chdir(rootfs); err != nil { - return fmt.Errorf("chdir into %s %s", rootfs, err) - } - - if noPivotRoot { - if err := rootMsMove(rootfs); err != nil { - return err - } - } else { - if err := rootPivot(rootfs); err != nil { - return err - } - } - - system.Umask(0022) - - return nil -} - -// use a pivot root to setup the rootfs -func rootPivot(rootfs string) error { - pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") - if err != nil { - return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err) - } - if err := system.Pivotroot(rootfs, pivotDir); err != nil { - return fmt.Errorf("pivot_root %s", err) - } - if err := system.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - // path to pivot dir now changed, update - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("unmount pivot_root dir %s", err) - } - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("remove pivot_root dir %s", err) - } - return nil -} - -// use MS_MOVE and chroot to setup the rootfs -func rootMsMove(rootfs string) error { - if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { - return fmt.Errorf("mount move %s into / %s", rootfs, err) - } - if err := system.Chroot("."); err != nil { - return fmt.Errorf("chroot . %s", err) - } - if err := system.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - return nil -} - -// copyDevNodes mknods the hosts devices so the new container has access to them -func copyDevNodes(rootfs string) error { - oldMask := system.Umask(0000) - defer system.Umask(oldMask) - - for _, node := range []string{ - "null", - "zero", - "full", - "random", - "urandom", - "tty", - } { - if err := copyDevNode(rootfs, node); err != nil { - return err - } - } - return nil -} - -func setupLoopbackDevices(rootfs string) error { - for i := 0; ; i++ { - if err := copyDevNode(rootfs, fmt.Sprintf("loop%d", i)); err != nil { - if !os.IsNotExist(err) { - return err - } - break - } - - } - return nil -} - -func copyDevNode(rootfs, node string) error { - stat, err := os.Stat(filepath.Join("/dev", node)) - if err != nil { - return err - } - var ( - dest = filepath.Join(rootfs, "dev", node) - st = stat.Sys().(*syscall.Stat_t) - ) - if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { - return fmt.Errorf("copy %s %s", node, err) - } - return nil -} - -// setupDev symlinks the current processes pipes into the -// appropriate destination on the containers rootfs -func setupDev(rootfs string) error { - for _, link := range []struct { - from string - to string - }{ - {"/proc/kcore", "/dev/core"}, - {"/proc/self/fd", "/dev/fd"}, - {"/proc/self/fd/0", "/dev/stdin"}, - {"/proc/self/fd/1", "/dev/stdout"}, - {"/proc/self/fd/2", "/dev/stderr"}, - } { - dest := filepath.Join(rootfs, link.to) - if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove %s %s", dest, err) - } - if err := os.Symlink(link.from, dest); err != nil { - return fmt.Errorf("symlink %s %s", dest, err) - } - } - return nil -} - -// setupConsole ensures that the container has a proper /dev/console setup -func setupConsole(rootfs, console string) error { - oldMask := system.Umask(0000) - defer system.Umask(oldMask) - - stat, err := os.Stat(console) - if err != nil { - return fmt.Errorf("stat console %s %s", console, err) - } - var ( - st = stat.Sys().(*syscall.Stat_t) - dest = filepath.Join(rootfs, "dev/console") - ) - if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove %s %s", dest, err) - } - if err := os.Chmod(console, 0600); err != nil { - return err - } - if err := os.Chown(console, 0, 0); err != nil { - return err - } - if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { - return fmt.Errorf("mknod %s %s", dest, err) - } - if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("bind %s to %s %s", console, dest, err) - } - return nil -} - -// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts -// inside the mount namespace -func mountSystem(rootfs string) error { - for _, m := range []struct { - source string - path string - device string - flags int - data string - }{ - {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, - {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, - {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"}, - {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"}, - } { - if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("mkdirall %s %s", m.path, err) - } - if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { - return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) - } - } - return nil -} - -// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and -// finishes setting up /dev/console -func setupPtmx(rootfs, console string) error { - ptmx := filepath.Join(rootfs, "dev/ptmx") - if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { - return err - } - if err := os.Symlink("pts/ptmx", ptmx); err != nil { - return fmt.Errorf("symlink dev ptmx %s", err) - } - if console != "" { - if err := setupConsole(rootfs, console); err != nil { - return err - } - } - return nil -} - -// remountProc is used to detach and remount the proc filesystem -// commonly needed with running a new process inside an existing container -func remountProc() error { - if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil { - return err - } - if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { - return err - } - return nil -} - -func remountSys() error { - if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil { - if err != syscall.EINVAL { - return err - } - } else { - if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { - return err - } - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/nsinit/main.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/nsinit/main.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/nsinit/main.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/nsinit/main.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" - "io/ioutil" - "log" - "os" - "path/filepath" - "strconv" -) - -var ( - root, console string - pipeFd int -) - -func registerFlags() { - flag.StringVar(&console, "console", "", "console (pty slave) path") - flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd") - flag.StringVar(&root, "root", ".", "root for storing configuration data") - - flag.Parse() -} - -func main() { - registerFlags() - - if flag.NArg() < 1 { - log.Fatalf("wrong number of argments %d", flag.NArg()) - } - container, err := loadContainer() - if err != nil { - log.Fatal(err) - } - ns, err := newNsInit() - if err != nil { - log.Fatal(err) - } - - switch flag.Arg(0) { - case "exec": // this is executed outside of the namespace in the cwd - var exitCode int - nspid, err := readPid() - if err != nil { - if !os.IsNotExist(err) { - log.Fatal(err) - } - } - if nspid > 0 { - exitCode, err = ns.ExecIn(container, nspid, flag.Args()[1:]) - } else { - term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = ns.Exec(container, term, flag.Args()[1:]) - } - if err != nil { - log.Fatal(err) - } - os.Exit(exitCode) - case "init": // this is executed inside of the namespace to setup the container - cwd, err := os.Getwd() - if err != nil { - log.Fatal(err) - } - if flag.NArg() < 2 { - log.Fatalf("wrong number of argments %d", flag.NArg()) - } - syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) - if err != nil { - log.Fatal(err) - } - if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { - log.Fatal(err) - } - default: - log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) - } -} - -func loadContainer() (*libcontainer.Container, error) { - f, err := os.Open(filepath.Join(root, "container.json")) - if err != nil { - return nil, err - } - defer f.Close() - - var container *libcontainer.Container - if err := json.NewDecoder(f).Decode(&container); err != nil { - return nil, err - } - return container, nil -} - -func readPid() (int, error) { - data, err := ioutil.ReadFile(filepath.Join(root, "pid")) - if err != nil { - return -1, err - } - pid, err := strconv.Atoi(string(data)) - if err != nil { - return -1, err - } - return pid, nil -} - -func newNsInit() (nsinit.NsInit, error) { - return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/nsinit.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/nsinit.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/nsinit.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/nsinit.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -package nsinit - -import ( - "github.com/dotcloud/docker/pkg/libcontainer" -) - -// NsInit is an interface with the public facing methods to provide high level -// exec operations on a container -type NsInit interface { - Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) - ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) - Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error -} - -type linuxNs struct { - root string - commandFactory CommandFactory - stateWriter StateWriter -} - -func NewNsInit(command CommandFactory, state StateWriter) NsInit { - return &linuxNs{ - commandFactory: command, - stateWriter: state, - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/state.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/state.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/state.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/state.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -package nsinit - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// StateWriter handles writing and deleting the pid file -// on disk -type StateWriter interface { - WritePid(pid int) error - DeletePid() error -} - -type DefaultStateWriter struct { - Root string -} - -// writePidFile writes the namespaced processes pid to pid in the rootfs for the container -func (d *DefaultStateWriter) WritePid(pid int) error { - return ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) -} - -func (d *DefaultStateWriter) DeletePid() error { - return os.Remove(filepath.Join(d.Root, "pid")) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/sync_pipe.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/sync_pipe.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/sync_pipe.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/sync_pipe.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -package nsinit - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "io/ioutil" - "os" -) - -// SyncPipe allows communication to and from the child processes -// to it's parent and allows the two independent processes to -// syncronize their state. -type SyncPipe struct { - parent, child *os.File -} - -func NewSyncPipe() (s *SyncPipe, err error) { - s = &SyncPipe{} - s.child, s.parent, err = os.Pipe() - if err != nil { - return nil, err - } - return s, nil -} - -func NewSyncPipeFromFd(parendFd, childFd uintptr) (*SyncPipe, error) { - s := &SyncPipe{} - if parendFd > 0 { - s.parent = os.NewFile(parendFd, "parendPipe") - } else if childFd > 0 { - s.child = os.NewFile(childFd, "childPipe") - } else { - return nil, fmt.Errorf("no valid sync pipe fd specified") - } - return s, nil -} - -func (s *SyncPipe) SendToChild(context libcontainer.Context) error { - data, err := json.Marshal(context) - if err != nil { - return err - } - s.parent.Write(data) - return nil -} - -func (s *SyncPipe) ReadFromParent() (libcontainer.Context, error) { - data, err := ioutil.ReadAll(s.child) - if err != nil { - return nil, fmt.Errorf("error reading from sync pipe %s", err) - } - var context libcontainer.Context - if len(data) > 0 { - if err := json.Unmarshal(data, &context); err != nil { - return nil, err - } - } - return context, nil - -} - -func (s *SyncPipe) Close() error { - if s.parent != nil { - s.parent.Close() - } - if s.child != nil { - s.child.Close() - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/term.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/term.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/term.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/term.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -package nsinit - -import ( - "github.com/dotcloud/docker/pkg/term" - "io" - "os" - "os/exec" -) - -type Terminal interface { - io.Closer - SetMaster(*os.File) - Attach(*exec.Cmd) error - Resize(h, w int) error -} - -func NewTerminal(stdin io.Reader, stdout, stderr io.Writer, tty bool) Terminal { - if tty { - return &TtyTerminal{ - stdin: stdin, - stdout: stdout, - stderr: stderr, - } - } - return &StdTerminal{ - stdin: stdin, - stdout: stdout, - stderr: stderr, - } -} - -type TtyTerminal struct { - stdin io.Reader - stdout, stderr io.Writer - master *os.File - state *term.State -} - -func (t *TtyTerminal) Resize(h, w int) error { - return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) -} - -func (t *TtyTerminal) SetMaster(master *os.File) { - t.master = master -} - -func (t *TtyTerminal) Attach(command *exec.Cmd) error { - go io.Copy(t.stdout, t.master) - go io.Copy(t.master, t.stdin) - - state, err := t.setupWindow(t.master, os.Stdin) - if err != nil { - command.Process.Kill() - return err - } - t.state = state - return err -} - -// SetupWindow gets the parent window size and sets the master -// pty to the current size and set the parents mode to RAW -func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) { - ws, err := term.GetWinsize(parent.Fd()) - if err != nil { - return nil, err - } - if err := term.SetWinsize(master.Fd(), ws); err != nil { - return nil, err - } - return term.SetRawTerminal(parent.Fd()) -} - -func (t *TtyTerminal) Close() error { - term.RestoreTerminal(os.Stdin.Fd(), t.state) - return t.master.Close() -} - -type StdTerminal struct { - stdin io.Reader - stdout, stderr io.Writer -} - -func (s *StdTerminal) SetMaster(*os.File) { - // no need to set master on non tty -} - -func (s *StdTerminal) Close() error { - return nil -} - -func (s *StdTerminal) Resize(h, w int) error { - return nil -} - -func (s *StdTerminal) Attach(command *exec.Cmd) error { - inPipe, err := command.StdinPipe() - if err != nil { - return err - } - outPipe, err := command.StdoutPipe() - if err != nil { - return err - } - errPipe, err := command.StderrPipe() - if err != nil { - return err - } - - go func() { - defer inPipe.Close() - io.Copy(inPipe, s.stdin) - }() - - go io.Copy(s.stdout, outPipe) - go io.Copy(s.stderr, errPipe) - - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/unsupported.go docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/unsupported.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/nsinit/unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/nsinit/unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -// +build !linux - -package nsinit - -import ( - "github.com/dotcloud/docker/pkg/libcontainer" -) - -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { - return -1, libcontainer.ErrUnsupported -} - -func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { - return -1, libcontainer.ErrUnsupported -} - -func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { - return libcontainer.ErrUnsupported -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/README.md docker.io-1.3.2~dfsg1/pkg/libcontainer/README.md --- docker.io-0.9.1~dfsg1/pkg/libcontainer/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -## libcontainer - reference implementation for containers - -#### background - -libcontainer specifies configuration options for what a container is. It provides a native Go implementation -for using linux namespaces with no external dependencies. libcontainer provides many convience functions for working with namespaces, networking, and management. - - -#### container -A container is a self contained directory that is able to run one or more processes without -affecting the host system. The directory is usually a full system tree. Inside the directory -a `container.json` file is placed with the runtime configuration for how the processes -should be contained and ran. Environment, networking, and different capabilities for the -process are specified in this file. The configuration is used for each process executed inside the container. - -Sample `container.json` file: -```json -{ - "hostname": "koye", - "tty": true, - "environment": [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "namespaces": [ - "NEWIPC", - "NEWNS", - "NEWPID", - "NEWUTS", - "NEWNET" - ], - "capabilities": [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" - ], - "networks": [{ - "type": "veth", - "context": { - "bridge": "docker0", - "prefix": "dock" - }, - "address": "172.17.0.100/16", - "gateway": "172.17.42.1", - "mtu": 1500 - } - ], - "cgroups": { - "name": "docker-koye", - "parent": "docker", - "memory": 5248000 - } -} -``` - -Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file -is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run an new process inside an existing container with a live namespace the namespace will be joined by the new process. - - -You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. - -#### nsinit - -`nsinit` is a cli application used as the reference implementation of libcontainer. It is able to -spawn or join new containers giving the current directory. To use `nsinit` cd into a linux -rootfs and copy a `container.json` file into the directory with your specified configuration. - -To execute `/bin/bash` in the current directory as a container just run: -```bash -nsinit exec /bin/bash -``` - -If you wish to spawn another process inside the container while your current bash session is -running just run the exact same command again to get another bash shell or change the command. If the original process dies, PID 1, all other processes spawned inside the container will also be killed and the namespace will be removed. - -You can identify if a process is running in a container by looking to see if `pid` is in the root of the directory. diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/TODO.md docker.io-1.3.2~dfsg1/pkg/libcontainer/TODO.md --- docker.io-0.9.1~dfsg1/pkg/libcontainer/TODO.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/TODO.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -#### goals -* small and simple - line count is not everything but less code is better -* clean lines between what we do in the pkg -* provide primitives for working with namespaces not cater to every option -* extend via configuration not by features - host networking, no networking, veth network can be accomplished via adjusting the container.json, nothing to do with code - -#### tasks -* proper tty for a new process in an existing container -* use exec or raw syscalls for new process in existing container -* setup proper user in namespace if specified -* implement hook or clean interface for cgroups -* example configs for different setups (host networking, boot init) -* improve pkg documentation with comments -* testing - this is hard in a low level pkg but we could do some, maybe -* pivot root -* selinux -* apparmor diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/types.go docker.io-1.3.2~dfsg1/pkg/libcontainer/types.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/types.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/types.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,137 +0,0 @@ -package libcontainer - -import ( - "encoding/json" - "errors" - "github.com/syndtr/gocapability/capability" -) - -var ( - ErrUnkownNamespace = errors.New("Unknown namespace") - ErrUnkownCapability = errors.New("Unknown capability") - ErrUnsupported = errors.New("Unsupported method") -) - -// namespaceList is used to convert the libcontainer types -// into the names of the files located in /proc//ns/* for -// each namespace -var ( - namespaceList = Namespaces{} - - capabilityList = Capabilities{ - {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, - {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, - {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, - {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, - {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, - {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, - {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, - {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, - {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, - {Key: "MKNOD", Value: capability.CAP_MKNOD}, - {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, - {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, - {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, - {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, - {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, - } -) - -type ( - Namespace struct { - Key string - Value int - File string - } - Namespaces []*Namespace -) - -func (ns *Namespace) String() string { - return ns.Key -} - -func (ns *Namespace) MarshalJSON() ([]byte, error) { - return json.Marshal(ns.Key) -} - -func (ns *Namespace) UnmarshalJSON(src []byte) error { - var nsName string - if err := json.Unmarshal(src, &nsName); err != nil { - return err - } - ret := GetNamespace(nsName) - if ret == nil { - return ErrUnkownNamespace - } - *ns = *ret - return nil -} - -func GetNamespace(key string) *Namespace { - for _, ns := range namespaceList { - if ns.Key == key { - return ns - } - } - return nil -} - -// Contains returns true if the specified Namespace is -// in the slice -func (n Namespaces) Contains(ns string) bool { - for _, nsp := range n { - if nsp.Key == ns { - return true - } - } - return false -} - -type ( - Capability struct { - Key string - Value capability.Cap - } - Capabilities []*Capability -) - -func (c *Capability) String() string { - return c.Key -} - -func (c *Capability) MarshalJSON() ([]byte, error) { - return json.Marshal(c.Key) -} - -func (c *Capability) UnmarshalJSON(src []byte) error { - var capName string - if err := json.Unmarshal(src, &capName); err != nil { - return err - } - ret := GetCapability(capName) - if ret == nil { - return ErrUnkownCapability - } - *c = *ret - return nil -} - -func GetCapability(key string) *Capability { - for _, capp := range capabilityList { - if capp.Key == key { - return capp - } - } - return nil -} - -// Contains returns true if the specified Capability is -// in the slice -func (c Capabilities) Contains(capp string) bool { - for _, cap := range c { - if cap.Key == capp { - return true - } - } - return false -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/types_linux.go docker.io-1.3.2~dfsg1/pkg/libcontainer/types_linux.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/types_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/types_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -package libcontainer - -import ( - "syscall" -) - -func init() { - namespaceList = Namespaces{ - {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, - {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, - {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, - {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, - {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, - {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/types_test.go docker.io-1.3.2~dfsg1/pkg/libcontainer/types_test.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/types_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/types_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -package libcontainer - -import ( - "testing" -) - -func TestNamespacesContains(t *testing.T) { - ns := Namespaces{ - GetNamespace("NEWPID"), - GetNamespace("NEWNS"), - GetNamespace("NEWUTS"), - } - - if ns.Contains("NEWNET") { - t.Fatal("namespaces should not contain NEWNET") - } - - if !ns.Contains("NEWPID") { - t.Fatal("namespaces should contain NEWPID but does not") - } -} - -func TestCapabilitiesContains(t *testing.T) { - caps := Capabilities{ - GetCapability("MKNOD"), - GetCapability("SETPCAP"), - } - - if caps.Contains("SYS_ADMIN") { - t.Fatal("capabilities should not contain SYS_ADMIN") - } - if !caps.Contains("MKNOD") { - t.Fatal("capabilities should container MKNOD but does not") - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/libcontainer/utils/utils.go docker.io-1.3.2~dfsg1/pkg/libcontainer/utils/utils.go --- docker.io-0.9.1~dfsg1/pkg/libcontainer/utils/utils.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/libcontainer/utils/utils.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -package utils - -import ( - "crypto/rand" - "encoding/hex" - "io" - "path/filepath" -) - -// GenerateRandomName returns a new name joined with a prefix. This size -// specified is used to truncate the randomly generated value -func GenerateRandomName(prefix string, size int) (string, error) { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - return "", err - } - return prefix + hex.EncodeToString(id)[:size], nil -} - -// ResolveRootfs ensures that the current working directory is -// not a symlink and returns the absolute path to the rootfs -func ResolveRootfs(uncleanRootfs string) (string, error) { - rootfs, err := filepath.Abs(uncleanRootfs) - if err != nil { - return "", err - } - return filepath.EvalSymlinks(rootfs) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/log/log.go docker.io-1.3.2~dfsg1/pkg/log/log.go --- docker.io-0.9.1~dfsg1/pkg/log/log.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/log/log.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,83 @@ +package log + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" +) + +type priority int + +const ( + errorFormat = "[%s] %s:%d %s\n" + logFormat = "[%s] %s\n" + + fatal priority = iota + error + info + debug +) + +// A common interface to access the Fatal method of +// both testing.B and testing.T. +type Fataler interface { + Fatal(args ...interface{}) +} + +func (p priority) String() string { + switch p { + case fatal: + return "fatal" + case error: + return "error" + case info: + return "info" + case debug: + return "debug" + } + + return "" +} + +// Debug function, if the debug flag is set, then display. Do nothing otherwise +// If Docker is in damon mode, also send the debug info on the socket +func Debugf(format string, a ...interface{}) { + if os.Getenv("DEBUG") != "" { + logf(os.Stderr, debug, format, a...) + } +} + +func Infof(format string, a ...interface{}) { + logf(os.Stdout, info, format, a...) +} + +func Errorf(format string, a ...interface{}) { + logf(os.Stderr, error, format, a...) +} + +func Fatalf(format string, a ...interface{}) { + logf(os.Stderr, fatal, format, a...) + os.Exit(1) +} + +func logf(stream io.Writer, level priority, format string, a ...interface{}) { + var prefix string + + if level <= error || level == debug { + // Retrieve the stack infos + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = -1 + } else { + file = file[strings.LastIndex(file, "/")+1:] + } + prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) + } else { + prefix = fmt.Sprintf(logFormat, level.String(), format) + } + + fmt.Fprintf(stream, prefix, a...) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/log/log_test.go docker.io-1.3.2~dfsg1/pkg/log/log_test.go --- docker.io-0.9.1~dfsg1/pkg/log/log_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/log/log_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,37 @@ +package log + +import ( + "bytes" + "regexp" + + "testing" +) + +func TestLogFatalf(t *testing.T) { + var output *bytes.Buffer + + tests := []struct { + Level priority + Format string + Values []interface{} + ExpectedPattern string + }{ + {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, + {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + } + + for i, test := range tests { + output = &bytes.Buffer{} + logf(output, test.Level, test.Format, test.Values...) + + expected := regexp.MustCompile(test.ExpectedPattern) + if !expected.MatchString(output.String()) { + t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", + i, + expected.String(), + output.String()) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mflag/example/example.go docker.io-1.3.2~dfsg1/pkg/mflag/example/example.go --- docker.io-0.9.1~dfsg1/pkg/mflag/example/example.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mflag/example/example.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,7 +2,8 @@ import ( "fmt" - flag "github.com/dotcloud/docker/pkg/mflag" + + flag "github.com/docker/docker/pkg/mflag" ) var ( @@ -19,15 +20,17 @@ flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") flag.Parse() } func main() { if h { flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) } - fmt.Printf("s/#hidden/-string: %s\n", str) - fmt.Printf("b: %b\n", b) - fmt.Printf("-bool: %b\n", b2) - fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) - fmt.Printf("ARGS: %v\n", flag.Args()) } diff -Nru docker.io-0.9.1~dfsg1/pkg/mflag/flag.go docker.io-1.3.2~dfsg1/pkg/mflag/flag.go --- docker.io-0.9.1~dfsg1/pkg/mflag/flag.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mflag/flag.go 2014-11-24 17:38:01.000000000 +0000 @@ -10,7 +10,7 @@ Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. - import "flag" + import "flag /github.com/docker/docker/pkg/mflag" var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int @@ -23,6 +23,18 @@ flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. + You can also add "deprecated" flags, they are still usable, bur are not shown + in the usage and will display a warning when you try to use them: + var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") + this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") + will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + + You can also group one letter flags, bif you declare + var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") + var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") + you will be able to use the -vs or -sv + After all flags are defined, call flag.Parse() to parse the command line into the defined flags. @@ -39,6 +51,8 @@ Command line flag syntax: -flag -flag=x + -flag="x" + -flag='x' -flag x // non-boolean flags only One or two minus signs may be used; they are equivalent. The last form is not permitted for boolean flags because the @@ -71,6 +85,7 @@ "sort" "strconv" "strings" + "text/tabwriter" "time" ) @@ -286,11 +301,29 @@ DefValue string // default value (as text); for usage message } +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { - var list sort.StringSlice - for _, f := range flags { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n if len(f.Names) == 1 { list = append(list, fName) continue @@ -307,10 +340,10 @@ list = append(list, fName) } } - list.Sort() + sort.Sort(list) result := make([]*Flag, len(list)) for i, name := range list { - result[i] = flags[name] + result[i] = flags[nameMap[name]] } return result } @@ -392,11 +425,12 @@ // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(f.out(), 20, 1, 3, ' ', 0) f.VisitAll(func(flag *Flag) { - format := " -%s=%s: %s\n" + format := " -%s=%s" if _, ok := flag.Value.(*stringValue); ok { // put quotes on the value - format = " -%s=%q: %s\n" + format = " -%s=%q" } names := []string{} for _, name := range flag.Names { @@ -405,9 +439,16 @@ } } if len(names) > 0 { - fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) + fmt.Fprintf(writer, format, strings.Join(names, ", -"), flag.DefValue) + for i, line := range strings.Split(flag.Usage, "\n") { + if i != 0 { + line = " " + line + } + fmt.Fprintln(writer, "\t", line) + } } }) + writer.Flush() } // PrintDefaults prints to standard error the default values of all defined command-line flags. @@ -436,6 +477,23 @@ PrintDefaults() } +// FlagCount returns the number of flags that have been defined. +func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (f *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(f.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + // NFlag returns the number of flags that have been set. func (f *FlagSet) NFlag() int { return len(f.actual) } @@ -737,6 +795,40 @@ } } +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + // parseOne parses one flag. It reports whether a flag was seen. func (f *FlagSet) parseOne() (bool, string, error) { if len(f.args) == 0 { @@ -759,14 +851,12 @@ f.args = f.args[1:] has_value := false value := "" - for i := 1; i < len(name); i++ { // equals cannot be first - if name[i] == '=' { - value = name[i+1:] - has_value = true - name = name[0:i] - break - } + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + has_value = true + name = name[:i] } + m := f.formal flag, alreadythere := m[name] // BUG if !alreadythere { diff -Nru docker.io-0.9.1~dfsg1/pkg/mflag/flag_test.go docker.io-1.3.2~dfsg1/pkg/mflag/flag_test.go --- docker.io-0.9.1~dfsg1/pkg/mflag/flag_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mflag/flag_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -7,7 +7,7 @@ import ( "bytes" "fmt" - . "github.com/dotcloud/docker/pkg/mflag" + . "github.com/docker/docker/pkg/mflag" "os" "sort" "strings" @@ -173,6 +173,12 @@ uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") extra := "one-extra-argument" @@ -184,6 +190,12 @@ "-uint", "24", "--uint64", "25", "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, "-float64", "2718e28", "-duration", "2m", extra, @@ -215,6 +227,24 @@ if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } if *float64Flag != 2718e28 { t.Error("float64 flag should be 2718e28, is ", *float64Flag) } @@ -398,3 +428,79 @@ t.Fatal("help was called; should not have been for defined help flag") } } + +// Test the flag count functions. +func TestFlagCounts(t *testing.T) { + fs := NewFlagSet("help test", ContinueOnError) + var flag bool + fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") + fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") + + if fs.FlagCount() != 6 { + t.Fatal("FlagCount wrong. ", fs.FlagCount()) + } + if fs.FlagCountUndeprecated() != 4 { + t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) + } + if fs.NFlag() != 0 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } + err := fs.Parse([]string{"-fd", "-g", "-flag4"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if fs.NFlag() != 4 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } +} + +// Show up bug in sortFlags +func TestSortFlags(t *testing.T) { + fs := NewFlagSet("help TestSortFlags", ContinueOnError) + + var err error + + var b bool + fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") + + err = fs.Parse([]string{"--banana=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + count := 0 + + fs.VisitAll(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("VisitAll should not return a nil flag") + } + }) + flagcount := fs.FlagCount() + if flagcount != count { + t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) + } + // Make sure its idempotent + if flagcount != fs.FlagCount() { + t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) + } + + count = 0 + fs.Visit(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("Visit should not return a nil flag") + } + }) + nflag := fs.NFlag() + if nflag != count { + t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) + } + if nflag != fs.NFlag() { + t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mflag/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/mflag/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/mflag/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mflag/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/flags_freebsd.go docker.io-1.3.2~dfsg1/pkg/mount/flags_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/mount/flags_freebsd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/flags_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,28 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + RDONLY = C.MNT_RDONLY + NOSUID = C.MNT_NOSUID + NOEXEC = C.MNT_NOEXEC + SYNCHRONOUS = C.MNT_SYNCHRONOUS + NOATIME = C.MNT_NOATIME + + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + PRIVATE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/flags.go docker.io-1.3.2~dfsg1/pkg/mount/flags.go --- docker.io-0.9.1~dfsg1/pkg/mount/flags.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/flags.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,62 @@ +package mount + +import ( + "strings" +) + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + flags := map[string]struct { + clear bool + flag int + }{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "private": {false, PRIVATE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, + } + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/flags_linux.go docker.io-1.3.2~dfsg1/pkg/mount/flags_linux.go --- docker.io-0.9.1~dfsg1/pkg/mount/flags_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/flags_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,64 +1,23 @@ -// +build amd64 - package mount import ( - "strings" "syscall" ) -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - flags := map[string]struct { - clear bool - flag int - }{ - "defaults": {false, 0}, - "ro": {false, syscall.MS_RDONLY}, - "rw": {true, syscall.MS_RDONLY}, - "suid": {true, syscall.MS_NOSUID}, - "nosuid": {false, syscall.MS_NOSUID}, - "dev": {true, syscall.MS_NODEV}, - "nodev": {false, syscall.MS_NODEV}, - "exec": {true, syscall.MS_NOEXEC}, - "noexec": {false, syscall.MS_NOEXEC}, - "sync": {false, syscall.MS_SYNCHRONOUS}, - "async": {true, syscall.MS_SYNCHRONOUS}, - "dirsync": {false, syscall.MS_DIRSYNC}, - "remount": {false, syscall.MS_REMOUNT}, - "mand": {false, syscall.MS_MANDLOCK}, - "nomand": {true, syscall.MS_MANDLOCK}, - "atime": {true, syscall.MS_NOATIME}, - "noatime": {false, syscall.MS_NOATIME}, - "diratime": {true, syscall.MS_NODIRATIME}, - "nodiratime": {false, syscall.MS_NODIRATIME}, - "bind": {false, syscall.MS_BIND}, - "rbind": {false, syscall.MS_BIND | syscall.MS_REC}, - "private": {false, syscall.MS_PRIVATE}, - "relatime": {false, syscall.MS_RELATIME}, - "norelatime": {true, syscall.MS_RELATIME}, - "strictatime": {false, syscall.MS_STRICTATIME}, - "nostrictatime": {true, syscall.MS_STRICTATIME}, - } - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table then it is a - // data value for a specific fs type - if f, exists := flags[o]; exists { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} +const ( + RDONLY = syscall.MS_RDONLY + NOSUID = syscall.MS_NOSUID + NODEV = syscall.MS_NODEV + NOEXEC = syscall.MS_NOEXEC + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + DIRSYNC = syscall.MS_DIRSYNC + REMOUNT = syscall.MS_REMOUNT + MANDLOCK = syscall.MS_MANDLOCK + NOATIME = syscall.MS_NOATIME + NODIRATIME = syscall.MS_NODIRATIME + BIND = syscall.MS_BIND + RBIND = syscall.MS_BIND | syscall.MS_REC + PRIVATE = syscall.MS_PRIVATE + RELATIME = syscall.MS_RELATIME + STRICTATIME = syscall.MS_STRICTATIME +) diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/flags_unsupported.go docker.io-1.3.2~dfsg1/pkg/mount/flags_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/mount/flags_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/flags_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,7 +1,22 @@ -// +build !linux !amd64 +// +build !linux,!freebsd freebsd,!cgo package mount -func parseOptions(options string) (int, string) { - panic("Not implemented") -} +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + PRIVATE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mounter_freebsd.go docker.io-1.3.2~dfsg1/pkg/mount/mounter_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/mount/mounter_freebsd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mounter_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mounter_linux.go docker.io-1.3.2~dfsg1/pkg/mount/mounter_linux.go --- docker.io-0.9.1~dfsg1/pkg/mount/mounter_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mounter_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,5 +1,3 @@ -// +build amd64 - package mount import ( diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mounter_unsupported.go docker.io-1.3.2~dfsg1/pkg/mount/mounter_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/mount/mounter_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mounter_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,4 @@ -// +build !linux !amd64 +// +build !linux,!freebsd freebsd,!cgo package mount diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mount.go docker.io-1.3.2~dfsg1/pkg/mount/mount.go --- docker.io-0.9.1~dfsg1/pkg/mount/mount.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mount.go 2014-11-24 17:38:01.000000000 +0000 @@ -29,8 +29,11 @@ // the target is not mounted // Options must be specified as fstab style func Mount(device, target, mType, options string) error { - if mounted, err := Mounted(target); err != nil || mounted { - return err + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } } return ForceMount(device, target, mType, options) } diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_freebsd.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_freebsd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,38 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*MountInfo + for _, entry := range entries { + var mountinfo MountInfo + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,79 +1,7 @@ package mount -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s " -) - type MountInfo struct { Id, Parent, Major, Minor int Root, Mountpoint, Opts string Fstype, Source, VfsOpts string } - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts -func parseMountTable() ([]*MountInfo, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*MountInfo, error) { - var ( - s = bufio.NewScanner(r) - out = []*MountInfo{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &MountInfo{} - text = s.Text() - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.Id, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) != 3 { - return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text) - } - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = postSeparatorFields[2] - out = append(out, p) - } - return out, nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_linux.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_linux.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,74 @@ +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s " +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +func parseMountTable() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*MountInfo, error) { + var ( + s = bufio.NewScanner(r) + out = []*MountInfo{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &MountInfo{} + text = s.Text() + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.Id, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_linux_test.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_linux_test.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_linux_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_linux_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,448 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_test.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_test.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,445 +0,0 @@ -package mount - -import ( - "bytes" - "testing" -) - -const ( - fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw - 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel - 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 - 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw - 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw - 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel - 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 - 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 - 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 - 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd - 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw - 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children - 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children - 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children - 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children - 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children - 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children - 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children - 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children - 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children - 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered - 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct - 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel - 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel - 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel - 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw - 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw - 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw - 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw - 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered - 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered - 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered - 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered - 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 - 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw - 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered - 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered - 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered - 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered - 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered - 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered - 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered - 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered - 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered - 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered - 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered - 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered - 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered - 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered - 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered - 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered - 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered - 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered - 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered - 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered - 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered - 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered` - - ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 -20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw -31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb -38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 -40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 -41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 -42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 -43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 -44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 -45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 -46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 -47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 -48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 -49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 -50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 -51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 -52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 -53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 -54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 -55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 -56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 -57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 -58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 -59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 -60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 -61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 -62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 -63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 -64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 -65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 -66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 -67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 -68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 -69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 -70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 -71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 -72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 -73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 -74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 -75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 -76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 -77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 -78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 -79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 -80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 -81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 -82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 -83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 -84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 -85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 -86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 -87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 -88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 -89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 -90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 -91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 -92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 -93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 -94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 -95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 -96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 -97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 -98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 -99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 -100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 -101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 -102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 -103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 -104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 -105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 -106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 -107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 -108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 -109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 -110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 -111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 -112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 -113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 -114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 -115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 -116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 -117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 -118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 -119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 -120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 -121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 -122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 -123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 -124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 -125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 -126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 -127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 -128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 -129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 -130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 -131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 -132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 -133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 -134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 -135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 -136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 -137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 -138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 -139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 -140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 -141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 -142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 -143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 -144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` - - gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 -18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 -19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw -22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw -24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 -25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc -26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children -27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children -28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children -29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children -30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children -31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children -32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children -33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro -34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota -35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw -36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw -43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw -44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 -68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c -85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c -39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c -40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c -41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c -45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c -46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c -47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c -48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c -49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c -50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c -51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c -52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c -53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c -54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c -55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c -56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c -57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c -59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c -60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c -61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c -62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c -63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c -64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c -65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c -66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c -70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c -71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c -72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c -73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c -76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c -77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c -78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c -79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c -80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c -81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c -82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c -83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c -84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c -94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c -95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c -96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c -97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c -98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c -102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c -103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c -104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c -105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c -106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c -107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c -108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c -109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c -110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c -111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c -112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c -113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c -114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c -117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c -118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c -119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c -120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c -121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c -122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c -123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c -126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c -127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c -128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c -130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c -131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c -132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c -133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c -134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c -135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c -136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c -137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c -138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c -139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c -140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c -141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c -142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c -143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c -144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c -147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c -150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c -151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c -152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c -153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c -154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c -155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c -156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c -157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c -158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c -159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c -160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c -162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c -163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c -164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c -165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c -166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c -167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c -168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c -169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c -170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c -171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c -172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c -173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c -174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c -184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c -187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c -188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c -189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c -190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c -191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c -192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c -193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c -194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c -195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c -196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c -197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c -198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c -199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c -200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c -201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c -202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c -203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c -204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c -205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c -206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c -207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c -208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c -209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c -210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c -211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c -212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c -213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c -214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c -215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c -216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c -217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c -218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c -219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c -220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c -221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c -222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c -223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c -224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c -225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c -226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c -227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c -228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c -229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c -230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c -231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c -232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c -233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c -234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c -235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c -237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c -238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c -239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c -240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c -241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c -242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c -243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c -244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c -245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c -246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c -247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c -249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c -250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c -251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c -252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c -253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c -254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c -255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c -256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c -257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c -259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c -260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c -261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c -262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c -263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c -264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c -58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c -67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c -265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c -270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c -273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c -278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c -281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c -286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c -289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c -99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` -) - -func TestParseFedoraMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseUbuntuMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(ubuntuMountInfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseGentooMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(gentooMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_unsupported.go docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/mount/mountinfo_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mountinfo_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,12 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*MountInfo, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/mount/mount_test.go docker.io-1.3.2~dfsg1/pkg/mount/mount_test.go --- docker.io-0.9.1~dfsg1/pkg/mount/mount_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/mount/mount_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,12 +3,11 @@ import ( "os" "path" - "syscall" "testing" ) func TestMountOptionsParsing(t *testing.T) { - options := "bind,ro,size=10k" + options := "noatime,ro,size=10k" flag, data := parseOptions(options) @@ -16,7 +15,7 @@ t.Fatalf("Expected size=10 got %s", data) } - expectedFlag := syscall.MS_BIND | syscall.MS_RDONLY + expectedFlag := NOATIME | RDONLY if flag != expectedFlag { t.Fatalf("Expected %d got %d", expectedFlag, flag) @@ -31,10 +30,15 @@ defer os.RemoveAll(tmp) var ( - sourcePath = path.Join(tmp, "sourcefile.txt") - targetPath = path.Join(tmp, "targetfile.txt") + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") ) + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) @@ -48,23 +52,23 @@ } f.Close() - if err := Mount(sourcePath, targetPath, "none", "bind,rw"); err != nil { + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { - if err := Unmount(targetPath); err != nil { + if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() - mounted, err := Mounted(targetPath) + mounted, err := Mounted(targetDir) if err != nil { t.Fatal(err) } if !mounted { - t.Fatalf("Expected %s to be mounted", targetPath) + t.Fatalf("Expected %s to be mounted", targetDir) } - if _, err := os.Stat(targetPath); err != nil { + if _, err := os.Stat(targetDir); err != nil { t.Fatal(err) } } @@ -77,10 +81,15 @@ defer os.RemoveAll(tmp) var ( - sourcePath = path.Join(tmp, "sourcefile.txt") - targetPath = path.Join(tmp, "targetfile.txt") + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") ) + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) @@ -94,11 +103,11 @@ } f.Close() - if err := Mount(sourcePath, targetPath, "none", "bind,ro"); err != nil { + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { t.Fatal(err) } defer func() { - if err := Unmount(targetPath); err != nil { + if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() @@ -108,3 +117,21 @@ t.Fatal("Should not be able to open a ro file as rw") } } + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/namesgenerator/names-generator.go docker.io-1.3.2~dfsg1/pkg/namesgenerator/names-generator.go --- docker.io-0.9.1~dfsg1/pkg/namesgenerator/names-generator.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/namesgenerator/names-generator.go 2014-11-24 17:38:01.000000000 +0000 @@ -6,42 +6,66 @@ "time" ) -type NameChecker interface { - Exists(name string) bool -} - var ( left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. http://en.wikipedia.org/wiki/Ada_Yonath + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. http://en.wikipedia.org/wiki/Adele_Goldstine // Alan Turing was a founding father of computer science. http://en.wikipedia.org/wiki/Alan_Turing. // Albert Einstein invented the general theory of relativity. http://en.wikipedia.org/wiki/Albert_Einstein // Ambroise Pare invented modern surgery. http://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. http://en.wikipedia.org/wiki/Archimedes + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. http://en.wikipedia.org/wiki/Barbara_McClintock // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) // Enrico Fermi invented the first nuclear reactor. http://en.wikipedia.org/wiki/Enrico_Fermi. + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. http://en.wikipedia.org/wiki/Erna_Schneider_Hoover // Euclid invented geometry. http://en.wikipedia.org/wiki/Euclid + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia // Isaac Newton invented classic mechanics and modern optics. http://en.wikipedia.org/wiki/Isaac_Newton + // Jane Colden - American botanist widely considered the first female American botanist - http://en.wikipedia.org/wiki/Jane_Colden + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - http://en.wikipedia.org/wiki/Jane_Goodall + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. http://en.wikipedia.org/wiki/Jean_Bartik + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. http://en.wikipedia.org/wiki/Jean_E._Sammet + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - http://en.wikipedia.org/wiki/Johanna_Mestorf // John McCarthy invented LISP: http://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - http://en.wikipedia.org/wiki/June_Almeida + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. http://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones // Leonardo Da Vinci invented too many things to list here. http://en.wikipedia.org/wiki/Leonardo_da_Vinci. // Linus Torvalds invented Linux and Git. http://en.wikipedia.org/wiki/Linus_Torvalds + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - http://en.wikipedia.org/wiki/Lise_Meitner // Louis Pasteur discovered vaccination, fermentation and pasteurization. http://en.wikipedia.org/wiki/Louis_Pasteur. // Malcolm McLean invented the modern shipping container: http://en.wikipedia.org/wiki/Malcom_McLean + // Maria Ardinghelli - Italian translator, mathematician and physicist - http://en.wikipedia.org/wiki/Maria_Ardinghelli + // Maria Kirch - German astronomer and first woman to discover a comet - http://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - http://en.wikipedia.org/wiki/Maria_Mayer // Marie Curie discovered radioactivity. http://en.wikipedia.org/wiki/Marie_Curie. + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - http://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman // Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking // Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak // Werner Heisenberg was a founding father of quantum mechanics. http://en.wikipedia.org/wiki/Werner_Heisenberg @@ -49,19 +73,20 @@ // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"} + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"} ) -func GenerateRandomName(checker NameChecker) (string, error) { - retry := 5 +func GetRandomName(retry int) string { rand.Seed(time.Now().UnixNano()) + +begin: name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) - for checker != nil && checker.Exists(name) && retry > 0 { - name = fmt.Sprintf("%s%d", name, rand.Intn(10)) - retry = retry - 1 + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin } - if retry == 0 { - return name, fmt.Errorf("Error generating random name") + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) } - return name, nil + return name } diff -Nru docker.io-0.9.1~dfsg1/pkg/namesgenerator/names-generator_test.go docker.io-1.3.2~dfsg1/pkg/namesgenerator/names-generator_test.go --- docker.io-0.9.1~dfsg1/pkg/namesgenerator/names-generator_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/namesgenerator/names-generator_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -4,35 +4,9 @@ "testing" ) -type FalseChecker struct{} - -func (n *FalseChecker) Exists(name string) bool { - return false -} - -type TrueChecker struct{} - -func (n *TrueChecker) Exists(name string) bool { - return true -} - -func TestGenerateRandomName(t *testing.T) { - if _, err := GenerateRandomName(&FalseChecker{}); err != nil { - t.Error(err) - } - - if _, err := GenerateRandomName(&TrueChecker{}); err == nil { - t.Error("An error was expected") - } - -} - // Make sure the generated names are awesome func TestGenerateAwesomeNames(t *testing.T) { - name, err := GenerateRandomName(&FalseChecker{}) - if err != nil { - t.Error(err) - } + name := GetRandomName(0) if !isAwesome(name) { t.Fatalf("Generated name '%s' is not awesome.", name) } diff -Nru docker.io-0.9.1~dfsg1/pkg/netlink/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/netlink/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/netlink/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/netlink/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) diff -Nru docker.io-0.9.1~dfsg1/pkg/netlink/netlink.go docker.io-1.3.2~dfsg1/pkg/netlink/netlink.go --- docker.io-0.9.1~dfsg1/pkg/netlink/netlink.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/netlink/netlink.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -// Packet netlink provide access to low level Netlink sockets and messages. -// -// Actual implementations are in: -// netlink_linux.go -// netlink_darwin.go -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrWrongSockType = errors.New("Wrong socket type") - ErrShortResponse = errors.New("Got short response from netlink") -) - -// A Route is a subnet associated with the interface to reach it. -type Route struct { - *net.IPNet - Iface *net.Interface - Default bool -} diff -Nru docker.io-0.9.1~dfsg1/pkg/netlink/netlink_linux.go docker.io-1.3.2~dfsg1/pkg/netlink/netlink_linux.go --- docker.io-0.9.1~dfsg1/pkg/netlink/netlink_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/netlink/netlink_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,810 +0,0 @@ -// +build amd64 - -package netlink - -import ( - "encoding/binary" - "fmt" - "net" - "syscall" - "unsafe" -) - -const ( - IFNAMSIZ = 16 - DEFAULT_CHANGE = 0xFFFFFFFF - IFLA_INFO_KIND = 1 - IFLA_INFO_DATA = 2 - VETH_INFO_PEER = 1 - IFLA_NET_NS_FD = 28 -) - -var nextSeqNr int - -func nativeEndian() binary.ByteOrder { - var x uint32 = 0x01020304 - if *(*byte)(unsafe.Pointer(&x)) == 0x01 { - return binary.BigEndian - } - return binary.LittleEndian -} - -func getSeq() int { - nextSeqNr = nextSeqNr + 1 - return nextSeqNr -} - -func getIpFamily(ip net.IP) int { - if len(ip) <= net.IPv4len { - return syscall.AF_INET - } - if ip.To4() != nil { - return syscall.AF_INET - } - return syscall.AF_INET6 -} - -type NetlinkRequestData interface { - Len() int - ToWireFormat() []byte -} - -type IfInfomsg struct { - syscall.IfInfomsg -} - -func newIfInfomsg(family int) *IfInfomsg { - return &IfInfomsg{ - IfInfomsg: syscall.IfInfomsg{ - Family: uint8(family), - }, - } -} - -func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { - msg := newIfInfomsg(family) - parent.children = append(parent.children, msg) - return msg -} - -func (msg *IfInfomsg) ToWireFormat() []byte { - native := nativeEndian() - - length := syscall.SizeofIfInfomsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = 0 - native.PutUint16(b[2:4], msg.Type) - native.PutUint32(b[4:8], uint32(msg.Index)) - native.PutUint32(b[8:12], msg.Flags) - native.PutUint32(b[12:16], msg.Change) - return b -} - -func (msg *IfInfomsg) Len() int { - return syscall.SizeofIfInfomsg -} - -type IfAddrmsg struct { - syscall.IfAddrmsg -} - -func newIfAddrmsg(family int) *IfAddrmsg { - return &IfAddrmsg{ - IfAddrmsg: syscall.IfAddrmsg{ - Family: uint8(family), - }, - } -} - -func (msg *IfAddrmsg) ToWireFormat() []byte { - native := nativeEndian() - - length := syscall.SizeofIfAddrmsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Prefixlen - b[2] = msg.Flags - b[3] = msg.Scope - native.PutUint32(b[4:8], msg.Index) - return b -} - -func (msg *IfAddrmsg) Len() int { - return syscall.SizeofIfAddrmsg -} - -type RtMsg struct { - syscall.RtMsg -} - -func newRtMsg(family int) *RtMsg { - return &RtMsg{ - RtMsg: syscall.RtMsg{ - Family: uint8(family), - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_UNIVERSE, - Protocol: syscall.RTPROT_BOOT, - Type: syscall.RTN_UNICAST, - }, - } -} - -func (msg *RtMsg) ToWireFormat() []byte { - native := nativeEndian() - - length := syscall.SizeofRtMsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Dst_len - b[2] = msg.Src_len - b[3] = msg.Tos - b[4] = msg.Table - b[5] = msg.Protocol - b[6] = msg.Scope - b[7] = msg.Type - native.PutUint32(b[8:12], msg.Flags) - return b -} - -func (msg *RtMsg) Len() int { - return syscall.SizeofRtMsg -} - -func rtaAlignOf(attrlen int) int { - return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) -} - -type RtAttr struct { - syscall.RtAttr - Data []byte - children []NetlinkRequestData -} - -func newRtAttr(attrType int, data []byte) *RtAttr { - return &RtAttr{ - RtAttr: syscall.RtAttr{ - Type: uint16(attrType), - }, - children: []NetlinkRequestData{}, - Data: data, - } -} - -func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { - attr := newRtAttr(attrType, data) - parent.children = append(parent.children, attr) - return attr -} - -func (a *RtAttr) Len() int { - l := 0 - for _, child := range a.children { - l += child.Len() + syscall.SizeofRtAttr - } - if l == 0 { - l++ - } - return rtaAlignOf(l + len(a.Data)) -} - -func (a *RtAttr) ToWireFormat() []byte { - native := nativeEndian() - - length := a.Len() - buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr)) - - if a.Data != nil { - copy(buf[4:], a.Data) - } else { - next := 4 - for _, child := range a.children { - childBuf := child.ToWireFormat() - copy(buf[next:], childBuf) - next += rtaAlignOf(len(childBuf)) - } - } - - if l := uint16(rtaAlignOf(length)); l != 0 { - native.PutUint16(buf[0:2], l+1) - } - native.PutUint16(buf[2:4], a.Type) - - return buf -} - -type NetlinkRequest struct { - syscall.NlMsghdr - Data []NetlinkRequestData -} - -func (rr *NetlinkRequest) ToWireFormat() []byte { - native := nativeEndian() - - length := rr.Len - dataBytes := make([][]byte, len(rr.Data)) - for i, data := range rr.Data { - dataBytes[i] = data.ToWireFormat() - length += uint32(len(dataBytes[i])) - } - b := make([]byte, length) - native.PutUint32(b[0:4], length) - native.PutUint16(b[4:6], rr.Type) - native.PutUint16(b[6:8], rr.Flags) - native.PutUint32(b[8:12], rr.Seq) - native.PutUint32(b[12:16], rr.Pid) - - next := 16 - for _, data := range dataBytes { - copy(b[next:], data) - next += len(data) - } - return b -} - -func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { - if data != nil { - rr.Data = append(rr.Data, data) - } -} - -func newNetlinkRequest(proto, flags int) *NetlinkRequest { - return &NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.NLMSG_HDRLEN), - Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), - Seq: uint32(getSeq()), - }, - } -} - -type NetlinkSocket struct { - fd int - lsa syscall.SockaddrNetlink -} - -func getNetlinkSocket() (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) - if err != nil { - return nil, err - } - s := &NetlinkSocket{ - fd: fd, - } - s.lsa.Family = syscall.AF_NETLINK - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) - return nil, err - } - - return s, nil -} - -func (s *NetlinkSocket) Close() { - syscall.Close(s.fd) -} - -func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { - return err - } - return nil -} - -func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { - rb := make([]byte, syscall.Getpagesize()) - nr, _, err := syscall.Recvfrom(s.fd, rb, 0) - if err != nil { - return nil, err - } - if nr < syscall.NLMSG_HDRLEN { - return nil, ErrShortResponse - } - rb = rb[:nr] - return syscall.ParseNetlinkMessage(rb) -} - -func (s *NetlinkSocket) GetPid() (uint32, error) { - lsa, err := syscall.Getsockname(s.fd) - if err != nil { - return 0, err - } - switch v := lsa.(type) { - case *syscall.SockaddrNetlink: - return v.Pid, nil - } - return 0, ErrWrongSockType -} - -func (s *NetlinkSocket) HandleAck(seq uint32) error { - native := nativeEndian() - - pid, err := s.GetPid() - if err != nil { - return err - } - -done: - for { - msgs, err := s.Receive() - if err != nil { - return err - } - for _, m := range msgs { - if m.Header.Seq != seq { - return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, seq) - } - if m.Header.Pid != pid { - return fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) - } - if m.Header.Type == syscall.NLMSG_DONE { - break done - } - if m.Header.Type == syscall.NLMSG_ERROR { - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { - break done - } - return syscall.Errno(-error) - } - } - } - - return nil -} - -// Add a new default gateway. Identical to: -// ip route add default via $ip -func AddDefaultGw(ip net.IP) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - family := getIpFamily(ip) - - wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newRtMsg(family) - wb.AddData(msg) - - var ipData []byte - if family == syscall.AF_INET { - ipData = ip.To4() - } else { - ipData = ip.To16() - } - - gateway := newRtAttr(syscall.RTA_GATEWAY, ipData) - - wb.AddData(gateway) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Bring up a particular network interface -func NetworkLinkUp(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = syscall.IFF_UP - msg.Index = int32(iface.Index) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -func NetworkLinkDown(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Change = syscall.IFF_UP - msg.Flags = 0 & ^syscall.IFF_UP - msg.Index = int32(iface.Index) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -func NetworkSetMTU(iface *net.Interface, mtu int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - var ( - b = make([]byte, 4) - native = nativeEndian() - ) - native.PutUint32(b, uint32(mtu)) - - data := newRtAttr(syscall.IFLA_MTU, b) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// same as ip link set $name master $master -func NetworkSetMaster(iface, master *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - var ( - b = make([]byte, 4) - native = nativeEndian() - ) - native.PutUint32(b, uint32(master.Index)) - - data := newRtAttr(syscall.IFLA_MASTER, b) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - var ( - b = make([]byte, 4) - native = nativeEndian() - ) - native.PutUint32(b, uint32(nspid)) - - data := newRtAttr(syscall.IFLA_NET_NS_PID, b) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -func NetworkSetNsFd(iface *net.Interface, fd int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - var ( - b = make([]byte, 4) - native = nativeEndian() - ) - native.PutUint32(b, uint32(fd)) - - data := newRtAttr(IFLA_NET_NS_FD, b) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Add an Ip address to an interface. This is identical to: -// ip addr add $ip/$ipNet dev $iface -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - family := getIpFamily(ip) - - wb := newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfAddrmsg(family) - msg.Index = uint32(iface.Index) - prefixLen, _ := ipNet.Mask.Size() - msg.Prefixlen = uint8(prefixLen) - wb.AddData(msg) - - var ipData []byte - if family == syscall.AF_INET { - ipData = ip.To4() - } else { - ipData = ip.To16() - } - - localData := newRtAttr(syscall.IFA_LOCAL, ipData) - wb.AddData(localData) - - addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) - wb.AddData(addrData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -func zeroTerminated(s string) []byte { - return []byte(s + "\000") -} - -func nonZeroTerminated(s string) []byte { - return []byte(s) -} - -// Add a new network link of a specified type. This is identical to -// running: ip add link $name type $linkType -func NetworkLinkAdd(name string, linkType string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - if name != "" { - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) - wb.AddData(nameData) - } - - kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType)) - - infoData := newRtAttr(syscall.IFLA_LINKINFO, kindData.ToWireFormat()) - wb.AddData(infoData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Returns an array of IPNet for all the currently routed subnets on ipv4 -// This is similar to the first column of "ip route" output -func NetworkGetRoutes() ([]Route, error) { - native := nativeEndian() - - s, err := getNetlinkSocket() - if err != nil { - return nil, err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return nil, err - } - - pid, err := s.GetPid() - if err != nil { - return nil, err - } - - res := make([]Route, 0) - -done: - for { - msgs, err := s.Receive() - if err != nil { - return nil, err - } - for _, m := range msgs { - if m.Header.Seq != wb.Seq { - return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq) - } - if m.Header.Pid != pid { - return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) - } - if m.Header.Type == syscall.NLMSG_DONE { - break done - } - if m.Header.Type == syscall.NLMSG_ERROR { - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { - break done - } - return nil, syscall.Errno(-error) - } - if m.Header.Type != syscall.RTM_NEWROUTE { - continue - } - - var r Route - - msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) - - if msg.Flags&syscall.RTM_F_CLONED != 0 { - // Ignore cloned routes - continue - } - - if msg.Table != syscall.RT_TABLE_MAIN { - // Ignore non-main tables - continue - } - - if msg.Family != syscall.AF_INET { - // Ignore non-ipv4 routes - continue - } - - if msg.Dst_len == 0 { - // Default routes - r.Default = true - } - - attrs, err := syscall.ParseNetlinkRouteAttr(&m) - if err != nil { - return nil, err - } - for _, attr := range attrs { - switch attr.Attr.Type { - case syscall.RTA_DST: - ip := attr.Value - r.IPNet = &net.IPNet{ - IP: ip, - Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), - } - case syscall.RTA_OIF: - index := int(native.Uint32(attr.Value[0:4])) - r.Iface, _ = net.InterfaceByIndex(index) - } - } - if r.Default || r.IPNet != nil { - res = append(res, r) - } - } - } - - return res, nil -} - -func getIfSocket() (fd int, err error) { - for _, socket := range []int{ - syscall.AF_INET, - syscall.AF_PACKET, - syscall.AF_INET6, - } { - if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { - break - } - } - if err == nil { - return fd, nil - } - return -1, err -} - -func NetworkChangeName(iface *net.Interface, newName string) error { - fd, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(fd) - - data := [IFNAMSIZ * 2]byte{} - // the "-1"s here are very important for ensuring we get proper null - // termination of our new C strings - copy(data[:IFNAMSIZ-1], iface.Name) - copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) - - if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { - return errno - } - return nil -} - -func NetworkCreateVethPair(name1, name2 string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) - wb.AddData(nameData) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) - - newIfInfomsgChild(nest3, syscall.AF_UNSPEC) - newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) - - wb.AddData(nest1) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/netlink/netlink_unsupported.go docker.io-1.3.2~dfsg1/pkg/netlink/netlink_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/netlink/netlink_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/netlink/netlink_unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,61 +0,0 @@ -// +build !linux !amd64 - -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrNotImplemented = errors.New("not implemented") -) - -func NetworkGetRoutes() ([]Route, error) { - return nil, ErrNotImplemented -} - -func NetworkLinkAdd(name string, linkType string) error { - return ErrNotImplemented -} - -func NetworkLinkUp(iface *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return ErrNotImplemented -} - -func AddDefaultGw(ip net.IP) error { - return ErrNotImplemented - -} - -func NetworkSetMTU(iface *net.Interface, mtu int) error { - return ErrNotImplemented -} - -func NetworkCreateVethPair(name1, name2 string) error { - return ErrNotImplemented -} - -func NetworkChangeName(iface *net.Interface, newName string) error { - return ErrNotImplemented -} - -func NetworkSetNsFd(iface *net.Interface, fd int) error { - return ErrNotImplemented -} - -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - return ErrNotImplemented -} - -func NetworkSetMaster(iface, master *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkDown(iface *net.Interface) error { - return ErrNotImplemented -} diff -Nru docker.io-0.9.1~dfsg1/pkg/networkfs/etchosts/etchosts.go docker.io-1.3.2~dfsg1/pkg/networkfs/etchosts/etchosts.go --- docker.io-0.9.1~dfsg1/pkg/networkfs/etchosts/etchosts.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/networkfs/etchosts/etchosts.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,53 @@ +package etchosts + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" +) + +var defaultContent = map[string]string{ + "localhost": "127.0.0.1", + "localhost ip6-localhost ip6-loopback": "::1", + "ip6-localnet": "fe00::0", + "ip6-mcastprefix": "ff00::0", + "ip6-allnodes": "ff02::1", + "ip6-allrouters": "ff02::2", +} + +func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { + content := bytes.NewBuffer(nil) + if IP != "" { + if domainname != "" { + content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + } else { + content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + } + } + + for hosts, ip := range defaultContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + + if extraContent != nil { + for hosts, ip := range *extraContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} + +func Update(path, IP, hostname string) error { + old, err := ioutil.ReadFile(path) + if err != nil { + return err + } + var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname))) + return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/networkfs/etchosts/etchosts_test.go docker.io-1.3.2~dfsg1/pkg/networkfs/etchosts/etchosts_test.go --- docker.io-0.9.1~dfsg1/pkg/networkfs/etchosts/etchosts_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/networkfs/etchosts/etchosts_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,108 @@ +package etchosts + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestBuildHostnameDomainname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildHostname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildNoIP(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "", "testhostname", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := ""; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestUpdate(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + if err := Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil); err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + + if err := Update(file.Name(), "1.1.1.1", "testhostname"); err != nil { + t.Fatal(err) + } + + content, err = ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "1.1.1.1\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/networkfs/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/networkfs/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/networkfs/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/networkfs/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/networkfs/resolvconf/resolvconf.go docker.io-1.3.2~dfsg1/pkg/networkfs/resolvconf/resolvconf.go --- docker.io-0.9.1~dfsg1/pkg/networkfs/resolvconf/resolvconf.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/networkfs/resolvconf/resolvconf.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,92 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" +) + +var ( + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) +) + +func Get() ([]byte, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + return resolv, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns = nsRegexp.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf) { + nameservers = append(nameservers, nameserver+"/32") + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +func Build(path string, dns, dnsSearch []string) error { + content := bytes.NewBuffer(nil) + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return err + } + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/networkfs/resolvconf/resolvconf_test.go docker.io-1.3.2~dfsg1/pkg/networkfs/resolvconf/resolvconf_test.go --- docker.io-0.9.1~dfsg1/pkg/networkfs/resolvconf/resolvconf_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/networkfs/resolvconf/resolvconf_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,158 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestGet(t *testing.T) { + resolvConfUtils, err := Get() + if err != nil { + t.Fatal(err) + } + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + t.Fatal(err) + } + if string(resolvConfUtils) != string(resolvConfSystem) { + t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") + } +} + +func TestGetNameservers(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4", "40.3.200.10"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, + } { + test := GetNameservers([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetNameserversAsCIDR(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, + } { + test := GetNameserversAsCIDR([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetSearchDomains(t *testing.T) { + for resolv, result := range map[string][]string{ + `search example.com`: {"example.com"}, + `search example.com # ignored`: {"example.com"}, + ` search example.com `: {"example.com"}, + ` search example.com # ignored`: {"example.com"}, + `search foo.example.com example.com`: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, + ``: {}, + `# ignored`: {}, + `nameserver 1.2.3.4 +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search dup1.example.com dup2.example.com +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search foo.example.com example.com +nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, + } { + test := GetSearchDomains([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func strSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + + return true +} + +func TestBuild(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildWithZeroLengthDomainSearch(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { + t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/opts/opts.go docker.io-1.3.2~dfsg1/pkg/opts/opts.go --- docker.io-0.9.1~dfsg1/pkg/opts/opts.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/opts/opts.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -package opts - -import ( - "fmt" - "github.com/dotcloud/docker/utils" - "os" - "path/filepath" - "regexp" - "strings" -) - -// ListOpts type -type ListOpts struct { - values []string - validator ValidatorFctType -} - -func NewListOpts(validator ValidatorFctType) ListOpts { - return ListOpts{ - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string(opts.values)) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - opts.values = append(opts.values, value) - return nil -} - -// Delete remove the given element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range opts.values { - if k == key { - opts.values = append(opts.values[:i], opts.values[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -// FIXME: can we remove this? -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values' slice. -// FIXME: Can we remove this? -func (opts *ListOpts) GetAll() []string { - return opts.values -} - -// Get checks the existence of the given key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len(opts.values) -} - -// Validators -type ValidatorFctType func(val string) (string, error) - -func ValidateAttach(val string) (string, error) { - if val != "stdin" && val != "stdout" && val != "stderr" { - return val, fmt.Errorf("Unsupported stream name: %s", val) - } - return val, nil -} - -func ValidateLink(val string) (string, error) { - if _, err := parseLink(val); err != nil { - return val, err - } - return val, nil -} - -// FIXME: this is a duplicate of docker.utils.parseLink. -// it can't be moved to a separate links/ package because -// links depends on Container which is defined in the core. -// -// Links come in the format of -// name:alias -func parseLink(rawLink string) (map[string]string, error) { - return utils.PartParser("name:alias", rawLink) -} - -func ValidatePath(val string) (string, error) { - var containerPath string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for volumes: %s", val) - } - - splited := strings.SplitN(val, ":", 2) - if len(splited) == 1 { - containerPath = splited[0] - val = filepath.Clean(splited[0]) - } else { - containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) - } - - if !filepath.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func ValidateIp4Address(val string) (string, error) { - re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) - var ns = re.FindSubmatch([]byte(val)) - if len(ns) > 0 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not an ip4 address", val) -} diff -Nru docker.io-0.9.1~dfsg1/pkg/opts/opts_test.go docker.io-1.3.2~dfsg1/pkg/opts/opts_test.go --- docker.io-0.9.1~dfsg1/pkg/opts/opts_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/opts/opts_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -package opts - -import ( - "testing" -) - -func TestValidateIP4(t *testing.T) { - if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err) - } - -} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/filters/parse.go docker.io-1.3.2~dfsg1/pkg/parsers/filters/parse.go --- docker.io-0.9.1~dfsg1/pkg/parsers/filters/parse.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/filters/parse.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,63 @@ +package filters + +import ( + "encoding/json" + "errors" + "strings" +) + +type Args map[string][]string + +// Parse the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + var filters Args = prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrorBadFormat + } + + f := strings.SplitN(arg, "=", 2) + filters[f[0]] = append(filters[f[0]], f[1]) + + return filters, nil +} + +var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") + +// packs the Args into an string for easy transport from client to server +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// unpacks the filter Args +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + err := json.Unmarshal([]byte(p), &args) + if err != nil { + return nil, err + } + return args, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/filters/parse_test.go docker.io-1.3.2~dfsg1/pkg/parsers/filters/parse_test.go --- docker.io-0.9.1~dfsg1/pkg/parsers/filters/parse_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/filters/parse_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,78 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + for key, vals := range v1 { + if _, ok := a[key]; !ok { + t.Errorf("could not find key %s in original set", key) + } + sort.Strings(vals) + sort.Strings(a[key]) + if len(vals) != len(a[key]) { + t.Errorf("value lengths ought to match") + continue + } + for i := range vals { + if vals[i] != a[key][i] { + t.Errorf("expected %s, but got %s", a[key][i], vals[i]) + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/kernel/kernel.go docker.io-1.3.2~dfsg1/pkg/parsers/kernel/kernel.go --- docker.io-0.9.1~dfsg1/pkg/parsers/kernel/kernel.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/kernel/kernel.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,93 @@ +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/kernel/kernel_test.go docker.io-1.3.2~dfsg1/pkg/parsers/kernel/kernel_test.go --- docker.io-0.9.1~dfsg1/pkg/parsers/kernel/kernel_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/kernel/kernel_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +package kernel + +import ( + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) +} + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/kernel/uname_linux.go docker.io-1.3.2~dfsg1/pkg/parsers/kernel/uname_linux.go --- docker.io-0.9.1~dfsg1/pkg/parsers/kernel/uname_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/kernel/uname_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,16 @@ +package kernel + +import ( + "syscall" +) + +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/kernel/uname_unsupported.go docker.io-1.3.2~dfsg1/pkg/parsers/kernel/uname_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/parsers/kernel/uname_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/kernel/uname_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,15 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/parsers/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/parsers/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Erik Hollensbe (@erikh) diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/operatingsystem/operatingsystem.go docker.io-1.3.2~dfsg1/pkg/parsers/operatingsystem/operatingsystem.go --- docker.io-0.9.1~dfsg1/pkg/parsers/operatingsystem/operatingsystem.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/operatingsystem/operatingsystem.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,40 @@ +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/operatingsystem/operatingsystem_test.go docker.io-1.3.2~dfsg1/pkg/parsers/operatingsystem/operatingsystem_test.go --- docker.io-0.9.1~dfsg1/pkg/parsers/operatingsystem/operatingsystem_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/operatingsystem/operatingsystem_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,123 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + defer func() { + etcOsRelease = backup + os.RemoveAll(dir) + }() + + etcOsRelease = filepath.Join(dir, "etcOsRelease") + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + defer func() { + proc1Cgroup = backup + os.RemoveAll(dir) + }() + + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/parsers.go docker.io-1.3.2~dfsg1/pkg/parsers/parsers.go --- docker.io-0.9.1~dfsg1/pkg/parsers/parsers.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/parsers.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,110 @@ +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// FIXME: Change this not to receive default value as parameter +func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { + var ( + proto string + host string + port int + ) + addr = strings.TrimSpace(addr) + switch { + case addr == "tcp://": + return "", fmt.Errorf("Invalid bind address format: %s", addr) + case strings.HasPrefix(addr, "unix://"): + proto = "unix" + addr = strings.TrimPrefix(addr, "unix://") + if addr == "" { + addr = defaultUnix + } + case strings.HasPrefix(addr, "tcp://"): + proto = "tcp" + addr = strings.TrimPrefix(addr, "tcp://") + case strings.HasPrefix(addr, "fd://"): + return addr, nil + case addr == "": + proto = "unix" + addr = defaultUnix + default: + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid bind address protocol: %s", addr) + } + proto = "tcp" + } + + if proto != "unix" && strings.Contains(addr, ":") { + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + if hostParts[0] != "" { + host = hostParts[0] + } else { + host = defaultHost + } + + if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { + port = p + } else { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + + } else if proto == "tcp" && !strings.Contains(addr, ":") { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } else { + host = addr + } + if proto == "unix" { + return fmt.Sprintf("%s://%s", proto, host), nil + } + return fmt.Sprintf("%s://%s:%d", proto, host, port), nil +} + +// Get a repos name and returns the right reposName + tag +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +func ParseRepositoryTag(repos string) (string, string) { + n := strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/parsers/parsers_test.go docker.io-1.3.2~dfsg1/pkg/parsers/parsers_test.go --- docker.io-0.9.1~dfsg1/pkg/parsers/parsers_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/parsers/parsers_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,83 @@ +package parsers + +import ( + "testing" +) + +func TestParseHost(t *testing.T) { + var ( + defaultHttpHost = "127.0.0.1" + defaultUnix = "/var/run/docker.sock" + ) + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { + t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { + t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } +} + +func TestParsePortMapping(t *testing.T) { + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/pools/pools.go docker.io-1.3.2~dfsg1/pkg/pools/pools.go --- docker.io-0.9.1~dfsg1/pkg/pools/pools.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/pools/pools.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,111 @@ +// +build go1.3 + +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // Pool which returns bufio.Reader with a 32K buffer + BufioReader32KPool *BufioReaderPool + // Pool which returns bufio.Writer with a 32K buffer + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + pool sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/pools/pools_nopool.go docker.io-1.3.2~dfsg1/pkg/pools/pools_nopool.go --- docker.io-0.9.1~dfsg1/pkg/pools/pools_nopool.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/pools/pools_nopool.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,73 @@ +// +build !go1.3 + +package pools + +import ( + "bufio" + "io" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + BufioReader32KPool *BufioReaderPool + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + size int +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{size: size} +} + +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + return bufio.NewReaderSize(r, bufPool.size) +} + +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) +} + +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + return readCloser.Close() + } + return nil + }) +} + +type BufioWriterPool struct { + size int +} + +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{size: size} +} + +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + return bufio.NewWriterSize(w, bufPool.size) +} + +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) +} + +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + return writeCloser.Close() + } + return nil + }) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/promise/promise.go docker.io-1.3.2~dfsg1/pkg/promise/promise.go --- docker.io-0.9.1~dfsg1/pkg/promise/promise.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/promise/promise.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff -Nru docker.io-0.9.1~dfsg1/pkg/proxy/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/proxy/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/proxy/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/proxy/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1 +1 @@ -Michael Crosby (@crosbymichael) +Erik Hollensbe (@erikh) diff -Nru docker.io-0.9.1~dfsg1/pkg/proxy/udp_proxy.go docker.io-1.3.2~dfsg1/pkg/proxy/udp_proxy.go --- docker.io-0.9.1~dfsg1/pkg/proxy/udp_proxy.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/proxy/udp_proxy.go 2014-11-24 17:38:01.000000000 +0000 @@ -116,6 +116,7 @@ proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) + proxy.connTrackLock.Unlock() continue } proxy.connTrackTable[*fromKey] = proxyConn diff -Nru docker.io-0.9.1~dfsg1/pkg/reexec/command_linux.go docker.io-1.3.2~dfsg1/pkg/reexec/command_linux.go --- docker.io-0.9.1~dfsg1/pkg/reexec/command_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/reexec/command_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/reexec/command_unsupported.go docker.io-1.3.2~dfsg1/pkg/reexec/command_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/reexec/command_unsupported.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/reexec/command_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +// +build !linux + +package reexec + +import ( + "os/exec" +) + +func Command(args ...string) *exec.Cmd { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/reexec/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/reexec/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/reexec/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/reexec/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff -Nru docker.io-0.9.1~dfsg1/pkg/reexec/README.md docker.io-1.3.2~dfsg1/pkg/reexec/README.md --- docker.io-0.9.1~dfsg1/pkg/reexec/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/reexec/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff -Nru docker.io-0.9.1~dfsg1/pkg/reexec/reexec.go docker.io-1.3.2~dfsg1/pkg/reexec/reexec.go --- docker.io-0.9.1~dfsg1/pkg/reexec/reexec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/reexec/reexec.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,42 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registred under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +// Self returns the path to the current processes binary +func Self() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + name = lp + } + } + return name +} diff -Nru docker.io-0.9.1~dfsg1/pkg/signal/signal_freebsd.go docker.io-1.3.2~dfsg1/pkg/signal/signal_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/signal/signal_freebsd.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/signal/signal_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,8 +1,6 @@ package signal import ( - "os" - "os/signal" "syscall" ) diff -Nru docker.io-0.9.1~dfsg1/pkg/signal/trap.go docker.io-1.3.2~dfsg1/pkg/signal/trap.go --- docker.io-0.9.1~dfsg1/pkg/signal/trap.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/signal/trap.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,54 @@ +package signal + +import ( + "log" + "os" + gosignal "os/signal" + "sync/atomic" + "syscall" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is +// skipped and the process terminated directly. +// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup. +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + signals := []os.Signal{os.Interrupt, syscall.SIGTERM} + if os.Getenv("DEBUG") == "" { + signals = append(signals, syscall.SIGQUIT) + } + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + go func(sig os.Signal) { + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + // If the user really wants to interrupt, let him do so. + if atomic.LoadUint32(&interruptCount) < 3 { + atomic.AddUint32(&interruptCount, 1) + // Initiate the cleanup only once + if atomic.LoadUint32(&interruptCount) == 1 { + // Call cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + log.Printf("Force shutdown of docker, interrupting cleanup\n") + } + case syscall.SIGQUIT: + } + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} diff -Nru docker.io-0.9.1~dfsg1/pkg/stdcopy/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/stdcopy/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/stdcopy/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/stdcopy/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/pkg/stdcopy/stdcopy.go docker.io-1.3.2~dfsg1/pkg/stdcopy/stdcopy.go --- docker.io-0.9.1~dfsg1/pkg/stdcopy/stdcopy.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/stdcopy/stdcopy.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,172 @@ +package stdcopy + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/docker/docker/pkg/log" +) + +const ( + StdWriterPrefixLen = 8 + StdWriterFdIndex = 0 + StdWriterSizeIndex = 4 +) + +type StdType [StdWriterPrefixLen]byte + +var ( + Stdin StdType = StdType{0: 0} + Stdout StdType = StdType{0: 1} + Stderr StdType = StdType{0: 2} +) + +type StdWriter struct { + io.Writer + prefix StdType + sizeBuf []byte +} + +func (w *StdWriter) Write(buf []byte) (n int, err error) { + var n1, n2 int + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instanciated") + } + binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) + n1, err = w.Writer.Write(w.prefix[:]) + if err != nil { + n = n1 - StdWriterPrefixLen + } else { + n2, err = w.Writer.Write(buf) + n = n1 + n2 - StdWriterPrefixLen + } + if n < 0 { + n = 0 + } + return +} + +// NewStdWriter instanciates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be utils.Stdin, utils.Stdout, utils.Stderr. +func NewStdWriter(w io.Writer, t StdType) *StdWriter { + if len(t) != StdWriterPrefixLen { + return nil + } + + return &StdWriter{ + Writer: w, + prefix: t, + sizeBuf: make([]byte, 4), + } +} + +var ErrInvalidStdHeader = errors.New("Unrecognized input header") + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < StdWriterPrefixLen { + log.Debugf("Corrupted prefix: %v", buf[:nr]) + return written, nil + } + break + } + if er != nil { + log.Debugf("Error reading header: %s", er) + return 0, er + } + } + + // Check the first byte to know where to write + switch buf[StdWriterFdIndex] { + case 0: + fallthrough + case 1: + // Write on stdout + out = dstout + case 2: + // Write on stderr + out = dsterr + default: + log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) + return 0, ErrInvalidStdHeader + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + log.Debugf("framesize: %d", frameSize) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+StdWriterPrefixLen > bufLen { + log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+StdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+StdWriterPrefixLen { + log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + return written, nil + } + break + } + if er != nil { + log.Debugf("Error reading frame: %s", er) + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + if ew != nil { + log.Debugf("Error writing frame: %s", ew) + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + log.Debugf("Error Short Write: (%d on %d)", nw, frameSize) + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+StdWriterPrefixLen:]) + // Move the index + nr -= frameSize + StdWriterPrefixLen + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/stdcopy/stdcopy_test.go docker.io-1.3.2~dfsg1/pkg/stdcopy/stdcopy_test.go --- docker.io-0.9.1~dfsg1/pkg/stdcopy/stdcopy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/stdcopy/stdcopy_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,20 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "testing" +) + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/symlink/fs.go docker.io-1.3.2~dfsg1/pkg/symlink/fs.go --- docker.io-0.9.1~dfsg1/pkg/symlink/fs.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/symlink/fs.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,101 @@ +package symlink + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" +) + +const maxLoopCounter = 100 + +// FollowSymlink will follow an existing link and scope it to the root +// path provided. +// The role of this function is to return an absolute path in the root +// or normalize to the root if the symlink leads to a path which is +// outside of the root. +// Errors encountered while attempting to follow the symlink in path +// will be reported. +// Normalizations to the root don't constitute errors. +func FollowSymlinkInScope(link, root string) (string, error) { + root, err := filepath.Abs(root) + if err != nil { + return "", err + } + + link, err = filepath.Abs(link) + if err != nil { + return "", err + } + + if link == root { + return root, nil + } + + if !strings.HasPrefix(filepath.Dir(link), root) { + return "", fmt.Errorf("%s is not within %s", link, root) + } + + prev := "/" + + for _, p := range strings.Split(link, "/") { + prev = filepath.Join(prev, p) + + loopCounter := 0 + for { + loopCounter++ + + if loopCounter >= maxLoopCounter { + return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter) + } + + if !strings.HasPrefix(prev, root) { + // Don't resolve symlinks outside of root. For example, + // we don't have to check /home in the below. + // + // /home -> usr/home + // FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo") + break + } + + stat, err := os.Lstat(prev) + if err != nil { + if os.IsNotExist(err) { + break + } + return "", err + } + + // let's break if we're not dealing with a symlink + if stat.Mode()&os.ModeSymlink != os.ModeSymlink { + break + } + + // process the symlink + dest, err := os.Readlink(prev) + if err != nil { + return "", err + } + + if path.IsAbs(dest) { + prev = filepath.Join(root, dest) + } else { + prev, _ = filepath.Abs(prev) + + dir := filepath.Dir(prev) + prev = filepath.Join(dir, dest) + if dir == root && !strings.HasPrefix(prev, root) { + prev = root + } + if len(prev) < len(root) || (len(prev) == len(root) && prev != root) { + prev = filepath.Join(root, filepath.Base(dest)) + } + } + } + } + if prev == "/" { + prev = root + } + return prev, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/symlink/fs_test.go docker.io-1.3.2~dfsg1/pkg/symlink/fs_test.go --- docker.io-0.9.1~dfsg1/pkg/symlink/fs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/symlink/fs_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,248 @@ +package symlink + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func abs(t *testing.T, p string) string { + o, err := filepath.Abs(p) + if err != nil { + t.Fatal(err) + } + return o +} + +func TestFollowSymLinkNormal(t *testing.T) { + link := "testdata/fs/a/d/c/data" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/b/c/data"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + +func TestFollowSymLinkRelativePath(t *testing.T) { + link := "testdata/fs/i" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + +func TestFollowSymLinkUnderLinkedDir(t *testing.T) { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + os.Mkdir(filepath.Join(dir, "realdir"), 0700) + os.Symlink("realdir", filepath.Join(dir, "linkdir")) + + linkDir := filepath.Join(dir, "linkdir", "foo") + dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar") + os.MkdirAll(dirUnderLinkDir, 0700) + + rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dirUnderLinkDir { + t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite) + } +} + +func TestFollowSymLinkRandomString(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("Random string should fail but didn't") + } +} + +func TestFollowSymLinkLastLink(t *testing.T) { + link := "testdata/fs/a/d" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/b"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + +func TestFollowSymLinkRelativeLink(t *testing.T) { + link := "testdata/fs/a/e/c/data" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } +} + +func TestFollowSymLinkRelativeLinkScope(t *testing.T) { + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + { + link := "testdata/fs/b/h" + + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/root"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + { + link := "testdata/fs/a/e" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + { + link := "testdata/fs/j/k" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/j"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/symlink/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/symlink/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/symlink/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/symlink/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/sysinfo/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/sysinfo/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/sysinfo/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/sysinfo/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1,2 +1,2 @@ Michael Crosby (@crosbymichael) -Guillaume J. Charmes (@creack) +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/sysinfo/sysinfo.go docker.io-1.3.2~dfsg1/pkg/sysinfo/sysinfo.go --- docker.io-0.9.1~dfsg1/pkg/sysinfo/sysinfo.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/sysinfo/sysinfo.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,11 +1,12 @@ package sysinfo import ( - "github.com/dotcloud/docker/pkg/cgroups" "io/ioutil" "log" "os" "path" + + "github.com/docker/libcontainer/cgroups" ) type SysInfo struct { diff -Nru docker.io-0.9.1~dfsg1/pkg/system/calls_linux.go docker.io-1.3.2~dfsg1/pkg/system/calls_linux.go --- docker.io-0.9.1~dfsg1/pkg/system/calls_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/calls_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -package system - -import ( - "os/exec" - "syscall" -) - -func Chroot(dir string) error { - return syscall.Chroot(dir) -} - -func Chdir(dir string) error { - return syscall.Chdir(dir) -} - -func Exec(cmd string, args []string, env []string) error { - return syscall.Exec(cmd, args, env) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - return Exec(name, args, env) -} - -func Fork() (int, error) { - syscall.ForkLock.Lock() - pid, _, err := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0) - syscall.ForkLock.Unlock() - if err != 0 { - return -1, err - } - return int(pid), nil -} - -func Mount(source, target, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) -} - -func Unmount(target string, flags int) error { - return syscall.Unmount(target, flags) -} - -func Pivotroot(newroot, putold string) error { - return syscall.PivotRoot(newroot, putold) -} - -func Unshare(flags int) error { - return syscall.Unshare(flags) -} - -func Clone(flags uintptr) (int, error) { - syscall.ForkLock.Lock() - pid, _, err := syscall.RawSyscall(syscall.SYS_CLONE, flags, 0, 0) - syscall.ForkLock.Unlock() - if err != 0 { - return -1, err - } - return int(pid), nil -} - -func UsetCloseOnExec(fd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0); err != 0 { - return err - } - return nil -} - -func Setgroups(gids []int) error { - return syscall.Setgroups(gids) -} - -func Setresgid(rgid, egid, sgid int) error { - return syscall.Setresgid(rgid, egid, sgid) -} - -func Setresuid(ruid, euid, suid int) error { - return syscall.Setresuid(ruid, euid, suid) -} - -func Setgid(gid int) error { - return syscall.Setgid(gid) -} - -func Setuid(uid int) error { - return syscall.Setuid(uid) -} - -func Sethostname(name string) error { - return syscall.Sethostname([]byte(name)) -} - -func Setsid() (int, error) { - return syscall.Setsid() -} - -func Ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -func Closefd(fd uintptr) error { - return syscall.Close(int(fd)) -} - -func Dup2(fd1, fd2 uintptr) error { - return syscall.Dup2(int(fd1), int(fd2)) -} - -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -func ParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { - return err - } - return nil -} - -func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { - return err - } - return nil -} - -func Mkfifo(name string, mode uint32) error { - return syscall.Mkfifo(name, mode) -} - -func Umask(mask int) int { - return syscall.Umask(mask) -} - -func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Cloneflags = flag -} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/system/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/system/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/system/pty_linux.go docker.io-1.3.2~dfsg1/pkg/system/pty_linux.go --- docker.io-0.9.1~dfsg1/pkg/system/pty_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/pty_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -package system - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// Unlockpt should be called before opening the slave side of a pseudoterminal. -func Unlockpt(f *os.File) error { - var u int - return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// Ptsname retrieves the name of the first available pts for the given master. -func Ptsname(f *os.File) (string, error) { - var n int - - if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} - -// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the -// pts name for use as the pty slave inside the container -func CreateMasterAndConsole() (*os.File, string, error) { - master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) - if err != nil { - return nil, "", err - } - console, err := Ptsname(master) - if err != nil { - return nil, "", err - } - if err := Unlockpt(master); err != nil { - return nil, "", err - } - return master, console, nil -} - -// OpenPtmx opens /dev/ptmx, i.e. the PTY master. -func OpenPtmx() (*os.File, error) { - // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. - return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) -} - -// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC -// used to open the pty slave inside the container namespace -func OpenTerminal(name string, flag int) (*os.File, error) { - r, e := syscall.Open(name, flag, 0) - if e != nil { - return nil, &os.PathError{"open", name, e} - } - return os.NewFile(uintptr(r), name), nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/setns_linux.go docker.io-1.3.2~dfsg1/pkg/system/setns_linux.go --- docker.io-0.9.1~dfsg1/pkg/system/setns_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/setns_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/amd64": 308, -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return ErrNotSupportedPlatform - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/unsupported.go docker.io-1.3.2~dfsg1/pkg/system/unsupported.go --- docker.io-0.9.1~dfsg1/pkg/system/unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -// +build !linux - -package system - -import ( - "os/exec" -) - -func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { - -} - -func UsetCloseOnExec(fd uintptr) error { - return ErrNotSupportedPlatform -} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/utimes_darwin.go docker.io-1.3.2~dfsg1/pkg/system/utimes_darwin.go --- docker.io-0.9.1~dfsg1/pkg/system/utimes_darwin.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/utimes_darwin.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,11 @@ +package system + +import "syscall" + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/utimes_freebsd.go docker.io-1.3.2~dfsg1/pkg/system/utimes_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/system/utimes_freebsd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/utimes_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" +) + +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return syscall.UtimesNano(path, ts) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/utimes_linux.go docker.io-1.3.2~dfsg1/pkg/system/utimes_linux.go --- docker.io-0.9.1~dfsg1/pkg/system/utimes_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/utimes_linux.go 2014-11-24 17:38:01.000000000 +0000 @@ -24,8 +24,5 @@ } func UtimesNano(path string, ts []syscall.Timespec) error { - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - return nil + return syscall.UtimesNano(path, ts) } diff -Nru docker.io-0.9.1~dfsg1/pkg/system/utimes_test.go docker.io-1.3.2~dfsg1/pkg/system/utimes_test.go --- docker.io-0.9.1~dfsg1/pkg/system/utimes_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/utimes_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,64 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +func prepareFiles(t *testing.T) (string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink := prepareFiles(t) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/system/utimes_unsupported.go docker.io-1.3.2~dfsg1/pkg/system/utimes_unsupported.go --- docker.io-0.9.1~dfsg1/pkg/system/utimes_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/system/utimes_unsupported.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux,!freebsd,!darwin package system diff -Nru docker.io-0.9.1~dfsg1/pkg/systemd/activation/files.go docker.io-1.3.2~dfsg1/pkg/systemd/activation/files.go --- docker.io-0.9.1~dfsg1/pkg/systemd/activation/files.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/systemd/activation/files.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Package activation implements primitives for systemd socket activation. -package activation - -import ( - "os" - "strconv" - "syscall" -) - -// based on: https://gist.github.com/alberts/4640792 -const ( - listenFdsStart = 3 -) - -func Files(unsetEnv bool) []*os.File { - if unsetEnv { - // there is no way to unset env in golang os package for now - // https://code.google.com/p/go/issues/detail?id=6423 - defer os.Setenv("LISTEN_PID", "") - defer os.Setenv("LISTEN_FDS", "") - } - - pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) - if err != nil || pid != os.Getpid() { - return nil - } - - nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) - if err != nil || nfds == 0 { - return nil - } - - var files []*os.File - for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { - syscall.CloseOnExec(fd) - files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) - } - - return files -} diff -Nru docker.io-0.9.1~dfsg1/pkg/systemd/activation/listeners.go docker.io-1.3.2~dfsg1/pkg/systemd/activation/listeners.go --- docker.io-0.9.1~dfsg1/pkg/systemd/activation/listeners.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/systemd/activation/listeners.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -/* -Copyright 2014 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package activation - -import ( - "fmt" - "net" -) - -// Listeners returns net.Listeners for all socket activated fds passed to this process. -func Listeners(unsetEnv bool) ([]net.Listener, error) { - files := Files(unsetEnv) - listeners := make([]net.Listener, len(files)) - - for i, f := range files { - var err error - listeners[i], err = net.FileListener(f) - if err != nil { - return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) - } - } - - return listeners, nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/systemd/booted.go docker.io-1.3.2~dfsg1/pkg/systemd/booted.go --- docker.io-0.9.1~dfsg1/pkg/systemd/booted.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/systemd/booted.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,15 @@ +package systemd + +import ( + "os" +) + +// Conversion to Go of systemd's sd_booted() +func SdBooted() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil { + return false + } + + return s.IsDir() +} diff -Nru docker.io-0.9.1~dfsg1/pkg/systemd/listendfd.go docker.io-1.3.2~dfsg1/pkg/systemd/listendfd.go --- docker.io-0.9.1~dfsg1/pkg/systemd/listendfd.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/systemd/listendfd.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,7 +5,7 @@ "net" "strconv" - "github.com/dotcloud/docker/pkg/systemd/activation" + "github.com/coreos/go-systemd/activation" ) // ListenFD returns the specified socket activated files as a slice of diff -Nru docker.io-0.9.1~dfsg1/pkg/tailfile/tailfile.go docker.io-1.3.2~dfsg1/pkg/tailfile/tailfile.go --- docker.io-0.9.1~dfsg1/pkg/tailfile/tailfile.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tailfile/tailfile.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,61 @@ +package tailfile + +import ( + "bytes" + "errors" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") +var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive") + +//TailFile returns last n lines of file f +func TailFile(f *os.File, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tailfile/tailfile_test.go docker.io-1.3.2~dfsg1/pkg/tailfile/tailfile_test.go --- docker.io-0.9.1~dfsg1/pkg/tailfile/tailfile_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tailfile/tailfile_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/fileinfosums.go docker.io-1.3.2~dfsg1/pkg/tarsum/fileinfosums.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/fileinfosums.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/fileinfosums.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,125 @@ +package tarsum + +import "sort" + +// This info will be accessed through interface so the actual name and sum cannot be medled with +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +func (fis FileInfoSums) Len() int { return len(fis) } +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/fileinfosums_test.go docker.io-1.3.2~dfsg1/pkg/tarsum/fileinfosums_test.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/fileinfosums_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/fileinfosums_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,45 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/MAINTAINER docker.io-1.3.2~dfsg1/pkg/tarsum/MAINTAINER --- docker.io-0.9.1~dfsg1/pkg/tarsum/MAINTAINER 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/MAINTAINER 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Eric Windisch (@ewindisch) diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/tarsum.go docker.io-1.3.2~dfsg1/pkg/tarsum/tarsum.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/tarsum.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/tarsum.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,285 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "hash" + "io" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil +} + +// Create a new TarSum, providing a THash to use rather than the DefaultTHash +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// A hash.Hash type generator and its name +type THash interface { + Hash() hash.Hash + Name() string +} + +// Convenience method for creating a THash +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { + for _, elem := range [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } { + if v >= VersionDev && elem[0] == "mtime" { + continue + } + set = append(set, elem) + } + return +} + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.selectHeaders(h, ts.Version()) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + + // include the additional pax headers, from an ordered list + if ts.Version() >= VersionDev { + var keys []string + for k := range h.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { + return err + } + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.writer == nil { + if err := ts.initTarSum(); err != nil { + return 0, err + } + } + + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + log.Debugf("-->%s<--", fis.Sum()) + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + log.Debugf("checksum processed: %s", checksum) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/tarsum_test.go docker.io-1.3.2~dfsg1/pkg/tarsum/tarsum_test.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/tarsum_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/tarsum_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,408 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:486b86e25c4db4551228154848bc4663b15dd95784b1588980f4ba1cb42e83e9"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6235cd3a2afb7501bac541772a3d61a3634e95bc90bb39a4676e2cb98d08390d"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + } +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json --- docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json --- docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/collision/collision-0.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/collision/collision-0.tar differ Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/collision/collision-1.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/collision/collision-1.tar differ Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/collision/collision-2.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/collision/collision-2.tar differ Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/collision/collision-3.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/collision/collision-3.tar differ diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/xattr/json docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/xattr/json --- docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/xattr/json 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/xattr/json 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0} \ No newline at end of file Binary files /tmp/xlUMNaLkBE/docker.io-0.9.1~dfsg1/pkg/tarsum/testdata/xattr/layer.tar and /tmp/0HY7LDwBQ9/docker.io-1.3.2~dfsg1/pkg/tarsum/testdata/xattr/layer.tar differ diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/versioning.go docker.io-1.3.2~dfsg1/pkg/tarsum/versioning.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/versioning.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/versioning.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,56 @@ +package tarsum + +import ( + "errors" + "strings" +) + +// versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +const ( + // Prefix of "tarsum" + Version0 Version = iota + // Prefix of "tarsum.dev" + // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + VersionDev +) + +// Get a list of all known tarsum Version +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var tarSumVersions = map[Version]string{ + 0: "tarsum", + 1: "tarsum.dev", +} + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/versioning_test.go docker.io-1.3.2~dfsg1/pkg/tarsum/versioning_test.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/versioning_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/versioning_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,49 @@ +package tarsum + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/tarsum/writercloser.go docker.io-1.3.2~dfsg1/pkg/tarsum/writercloser.go --- docker.io-0.9.1~dfsg1/pkg/tarsum/writercloser.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/tarsum/writercloser.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/term/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/term/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/term/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/term/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1,2 +1 @@ -Guillaume Charmes (@creack) -Solomon Hykes (@shykes) +Solomon Hykes (@shykes) diff -Nru docker.io-0.9.1~dfsg1/pkg/term/termios_freebsd.go docker.io-1.3.2~dfsg1/pkg/term/termios_freebsd.go --- docker.io-0.9.1~dfsg1/pkg/term/termios_freebsd.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/term/termios_freebsd.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,65 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/testutils/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/testutils/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/testutils/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/testutils/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Solomon Hykes (@shykes) +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/pkg/testutils/README.md docker.io-1.3.2~dfsg1/pkg/testutils/README.md --- docker.io-0.9.1~dfsg1/pkg/testutils/README.md 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/testutils/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +`testutils` is a collection of utility functions to facilitate the writing +of tests. It is used in various places by the Docker test suite. diff -Nru docker.io-0.9.1~dfsg1/pkg/testutils/utils.go docker.io-1.3.2~dfsg1/pkg/testutils/utils.go --- docker.io-0.9.1~dfsg1/pkg/testutils/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/testutils/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,37 @@ +package testutils + +import ( + "math/rand" + "testing" + "time" +) + +const chars = "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + +// Timeout calls f and waits for 100ms for it to complete. +// If it doesn't, it causes the tests to fail. +// t must be a valid testing context. +func Timeout(t *testing.T, f func()) { + onTimeout := time.After(100 * time.Millisecond) + onDone := make(chan bool) + go func() { + f() + close(onDone) + }() + select { + case <-onTimeout: + t.Fatalf("timeout") + case <-onDone: + } +} + +// RandomString returns random string of specified length +func RandomString(length int) string { + res := make([]byte, length) + for i := 0; i < length; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/timeutils/json.go docker.io-1.3.2~dfsg1/pkg/timeutils/json.go --- docker.io-0.9.1~dfsg1/pkg/timeutils/json.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/timeutils/json.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,23 @@ +package timeutils + +import ( + "errors" + "time" +) + +const ( + // Define our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +func FastMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/timeutils/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/timeutils/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/timeutils/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/timeutils/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/pkg/truncindex/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/truncindex/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/truncindex/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/truncindex/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff -Nru docker.io-0.9.1~dfsg1/pkg/truncindex/truncindex.go docker.io-1.3.2~dfsg1/pkg/truncindex/truncindex.go --- docker.io-0.9.1~dfsg1/pkg/truncindex/truncindex.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/truncindex/truncindex.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,106 @@ +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + ErrNoID = errors.New("prefix can't be empty") +) + +func init() { + // Change patricia max prefix per node length, + // because our len(ID) always 64 + patricia.MaxPrefixPerNode = 64 +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + trie: patricia.NewTrie(), + } + for _, id := range ids { + idx.addId(id) + } + return +} + +func (idx *TruncIndex) addId(id string) error { + if strings.Contains(id, " ") { + return fmt.Errorf("Illegal character: ' '") + } + if id == "" { + return ErrNoID + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("Id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("Failed to insert id: %s", id) + } + return nil +} + +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addId(id); err != nil { + return err + } + return nil +} + +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("No such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("No such id: '%s'", id) + } + return nil +} + +func (idx *TruncIndex) Get(s string) (string, error) { + idx.RLock() + defer idx.RUnlock() + var ( + id string + ) + if s == "" { + return "", ErrNoID + } + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return fmt.Errorf("we've found two entries") + } + id = string(prefix) + return nil + } + + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", fmt.Errorf("No such id: %s", s) + } + if id != "" { + return id, nil + } + return "", fmt.Errorf("No such id: %s", s) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/truncindex/truncindex_test.go docker.io-1.3.2~dfsg1/pkg/truncindex/truncindex_test.go --- docker.io-0.9.1~dfsg1/pkg/truncindex/truncindex_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/truncindex/truncindex_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,401 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/utils" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/units/duration.go docker.io-1.3.2~dfsg1/pkg/units/duration.go --- docker.io-0.9.1~dfsg1/pkg/units/duration.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/units/duration.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,31 @@ +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.) +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%f years", d.Hours()/24/365) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/units/duration_test.go docker.io-1.3.2~dfsg1/pkg/units/duration_test.go --- docker.io-0.9.1~dfsg1/pkg/units/duration_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/units/duration_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package units + +import ( + "testing" + "time" +) + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month)) +} diff -Nru docker.io-0.9.1~dfsg1/pkg/units/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/units/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/units/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/units/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff -Nru docker.io-0.9.1~dfsg1/pkg/units/size.go docker.io-1.3.2~dfsg1/pkg/units/size.go --- docker.io-0.9.1~dfsg1/pkg/units/size.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/units/size.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,81 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) +) + +var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + +// HumanSize returns a human-readable approximation of a size +// using SI standard (eg. "44kB", "17MB") +func HumanSize(size int64) string { + i := 0 + sizef := float64(size) + for sizef >= 1000.0 { + sizef = sizef / 1000.0 + i++ + } + return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i]) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB") +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// Parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 3 { + return -1, fmt.Errorf("Invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[2]) + if mul, ok := uMap[unitPrefix]; ok { + size *= mul + } + + return size, nil +} diff -Nru docker.io-0.9.1~dfsg1/pkg/units/size_test.go docker.io-1.3.2~dfsg1/pkg/units/size_test.go --- docker.io-0.9.1~dfsg1/pkg/units/size_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/units/size_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,98 @@ +package units + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(3.42*GB)) + assertEquals(t, "5.372 TB", HumanSize(5.372*TB)) + assertEquals(t, "2.22 PB", HumanSize(2.22*PB)) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff -Nru docker.io-0.9.1~dfsg1/pkg/user/MAINTAINERS docker.io-1.3.2~dfsg1/pkg/user/MAINTAINERS --- docker.io-0.9.1~dfsg1/pkg/user/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/user/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Tianon Gravi (@tianon) diff -Nru docker.io-0.9.1~dfsg1/pkg/user/user.go docker.io-1.3.2~dfsg1/pkg/user/user.go --- docker.io-0.9.1~dfsg1/pkg/user/user.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/user/user.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,241 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswd() ([]*User, error) { - return ParsePasswdFilter(nil) -} - -func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { - f, err := os.Open("/etc/passwd") - if err != nil { - return nil, err - } - defer f.Close() - return parsePasswdFile(f, filter) -} - -func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { - var ( - s = bufio.NewScanner(r) - out = []*User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := &User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroup() ([]*Group, error) { - return ParseGroupFilter(nil) -} - -func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - return parseGroupFile(f, filter) -} - -func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { - var ( - s = bufio.NewScanner(r) - out = []*Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := &Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, and list of supplementary group IDs, if possible. -func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) (int, int, []int, error) { - var ( - uid = defaultUid - gid = defaultGid - suppGids = []int{} - - userArg, groupArg string - ) - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(func(u *User) bool { - if userArg == "" { - return u.Uid == uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && !os.IsNotExist(err) { - if userArg == "" { - userArg = strconv.Itoa(uid) - } - return 0, 0, nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - uid = users[0].Uid - gid = users[0].Gid - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || (haveUser && users[0].Name != "") { - groups, err := ParseGroupFilter(func(g *Group) bool { - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - for _, u := range g.List { - if u == users[0].Name { - return true - } - } - return false - }) - if err != nil && !os.IsNotExist(err) { - return 0, 0, nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - suppGids = make([]int, len(groups)) - for i, group := range groups { - suppGids[i] = group.Gid - } - } - } - - return uid, gid, suppGids, nil -} diff -Nru docker.io-0.9.1~dfsg1/pkg/user/user_test.go docker.io-1.3.2~dfsg1/pkg/user/user_test.go --- docker.io-0.9.1~dfsg1/pkg/user/user_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/user/user_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -package user - -import ( - "strings" - "testing" -) - -func TestUserParseLine(t *testing.T) { - var ( - a, b string - c []string - d int - ) - - parseLine("", &a, &b) - if a != "" || b != "" { - t.Fatalf("a and b should be empty ('%v', '%v')", a, b) - } - - parseLine("a", &a, &b) - if a != "a" || b != "" { - t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) - } - - parseLine("bad boys:corny cows", &a, &b) - if a != "bad boys" || b != "corny cows" { - t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) - } - - parseLine("", &c) - if len(c) != 0 { - t.Fatalf("c should be empty (%#v)", c) - } - - parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) - if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { - t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("::::::::::", &a, &b, &c) - if a != "" || b != "" || len(c) != 0 { - t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("not a number", &d) - if d != 0 { - t.Fatalf("d should be 0 (%v)", d) - } - - parseLine("b:12:c", &a, &d, &b) - if a != "b" || b != "c" || d != 12 { - t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) - } -} - -func TestUserParsePasswd(t *testing.T) { - users, err := parsePasswdFile(strings.NewReader(` -root:x:0:0:root:/root:/bin/bash -adm:x:3:4:adm:/var/adm:/bin/false -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(users) != 3 { - t.Fatalf("Expected 3 users, got %v", len(users)) - } - if users[0].Uid != 0 || users[0].Name != "root" { - t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) - } - if users[1].Uid != 3 || users[1].Name != "adm" { - t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) - } -} - -func TestUserParseGroup(t *testing.T) { - groups, err := parseGroupFile(strings.NewReader(` -root:x:0:root -adm:x:4:root,adm,daemon -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(groups) != 3 { - t.Fatalf("Expected 3 groups, got %v", len(groups)) - } - if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { - t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) - } - if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { - t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) - } -} diff -Nru docker.io-0.9.1~dfsg1/pkg/version/version.go docker.io-1.3.2~dfsg1/pkg/version/version.go --- docker.io-0.9.1~dfsg1/pkg/version/version.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/version/version.go 2014-11-24 17:38:01.000000000 +0000 @@ -7,14 +7,22 @@ type Version string -func (me Version) compareTo(other string) int { +func (me Version) compareTo(other Version) int { var ( meTab = strings.Split(string(me), ".") - otherTab = strings.Split(other, ".") + otherTab = strings.Split(string(other), ".") ) - for i, s := range meTab { + + max := len(meTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { var meInt, otherInt int - meInt, _ = strconv.Atoi(s) + + if len(meTab) > i { + meInt, _ = strconv.Atoi(meTab[i]) + } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } @@ -25,28 +33,25 @@ return -1 } } - if len(otherTab) > len(meTab) { - return -1 - } return 0 } -func (me Version) LessThan(other string) bool { +func (me Version) LessThan(other Version) bool { return me.compareTo(other) == -1 } -func (me Version) LessThanOrEqualTo(other string) bool { +func (me Version) LessThanOrEqualTo(other Version) bool { return me.compareTo(other) <= 0 } -func (me Version) GreaterThan(other string) bool { +func (me Version) GreaterThan(other Version) bool { return me.compareTo(other) == 1 } -func (me Version) GreaterThanOrEqualTo(other string) bool { +func (me Version) GreaterThanOrEqualTo(other Version) bool { return me.compareTo(other) >= 0 } -func (me Version) Equal(other string) bool { +func (me Version) Equal(other Version) bool { return me.compareTo(other) == 0 } diff -Nru docker.io-0.9.1~dfsg1/pkg/version/version_test.go docker.io-1.3.2~dfsg1/pkg/version/version_test.go --- docker.io-0.9.1~dfsg1/pkg/version/version_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/pkg/version/version_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -5,13 +5,15 @@ ) func assertVersion(t *testing.T, a, b string, result int) { - if r := Version(a).compareTo(b); r != result { + if r := Version(a).compareTo(Version(b)); r != result { t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) } } func TestCompareVersion(t *testing.T) { assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) assertVersion(t, "1", "1.0.1", -1) assertVersion(t, "1.0.1", "1", 1) diff -Nru docker.io-0.9.1~dfsg1/README.md docker.io-1.3.2~dfsg1/README.md --- docker.io-0.9.1~dfsg1/README.md 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/README.md 2014-11-24 17:38:01.000000000 +0000 @@ -18,7 +18,13 @@ of large-scale operation and support of hundreds of thousands of applications and databases. -![Docker L](docs/theme/docker/static/img/dockerlogo-h.png "Docker") +![Docker L](docs/theme/mkdocs/images/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a github issue. ## Better than VMs @@ -125,9 +131,8 @@ ```bash FROM ubuntu:12.04 -RUN apt-get update -RUN apt-get install -q -y python python-pip curl -RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv RUN cd helloflask-master && pip install -r requirements.txt ``` @@ -142,11 +147,10 @@ bare metal and virtualized. It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems. -We also offer an interactive tutorial for quickly learning the basics of -using Docker. +We also offer an [interactive tutorial](http://www.docker.com/tryit/) +for quickly learning the basics of using Docker. -For up-to-date install instructions and online tutorials, see the -[Getting Started page](http://www.docker.io/gettingstarted/). +For up-to-date install instructions, see the [Docs](http://docs.docker.com). Usage examples ============== @@ -155,7 +159,7 @@ (app servers, databases etc.), interactive shell sessions, etc. You can find a [list of real-world -examples](http://docs.docker.io/en/latest/examples/) in the +examples](http://docs.docker.com/examples/) in the documentation. Under the hood @@ -173,6 +177,9 @@ Contributing to Docker ====================== +[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) +[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker) + Want to hack on Docker? Awesome! There are instructions to get you started [here](CONTRIBUTING.md). @@ -190,3 +197,9 @@ violate applicable laws. For more information, please see http://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. + diff -Nru docker.io-0.9.1~dfsg1/registry/auth.go docker.io-1.3.2~dfsg1/registry/auth.go --- docker.io-0.9.1~dfsg1/registry/auth.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/auth.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,310 @@ +package registry + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "strings" + + "github.com/docker/docker/utils" +) + +const ( + // Where we store the config file + CONFIGFILE = ".dockercfg" + + // Only used for user auth + account creation + INDEXSERVER = "https://index.docker.io/v1/" + REGISTRYSERVER = "https://registry-1.docker.io/v1/" + + // INDEXSERVER = "https://registry-stage.hub.docker.com/v1/" +) + +var ( + ErrConfigFileMissing = errors.New("The Auth config file is missing") + IndexServerURL *url.URL +) + +func init() { + url, err := url.Parse(INDEXSERVER) + if err != nil { + panic(err) + } + IndexServerURL = url +} + +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +type ConfigFile struct { + Configs map[string]AuthConfig `json:"configs,omitempty"` + rootPath string +} + +func IndexServerAddress() string { + return INDEXSERVER +} + +// create a base64 encoded auth string to store in config +func encodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decode the auth string +func decodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// load up the auth config information and return values +// FIXME: use the internal golang config parser +func LoadConfig(rootPath string) (*ConfigFile, error) { + configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} + confFile := path.Join(rootPath, CONFIGFILE) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.Configs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = IndexServerAddress() + configFile.Configs[IndexServerAddress()] = authConfig + } else { + for k, authConfig := range configFile.Configs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + configFile.Configs[k] = authConfig + authConfig.ServerAddress = k + } + } + return &configFile, nil +} + +// save the auth config +func SaveConfig(configFile *ConfigFile) error { + confFile := path.Join(configFile.rootPath, CONFIGFILE) + if len(configFile.Configs) == 0 { + os.Remove(confFile) + return nil + } + + configs := make(map[string]AuthConfig, len(configFile.Configs)) + for k, authConfig := range configFile.Configs { + authCopy := authConfig + + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + configs[k] = authCopy + } + + b, err := json.Marshal(configs) + if err != nil { + return err + } + err = ioutil.WriteFile(confFile, b, 0600) + if err != nil { + return err + } + return nil +} + +// try to register/login to the registry server +func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { + var ( + status string + reqBody []byte + err error + client = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + }, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, + } + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + if serverAddress == "" { + serverAddress = IndexServerAddress() + } + + loginAgainstOfficialIndex := serverAddress == IndexServerAddress() + + // to avoid sending the server address to the server it should be removed before being marshalled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + reqStatusCode = req1.StatusCode + defer req1.Body.Close() + reqBody, err = ioutil.ReadAll(req1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) + } + + if reqStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if reqStatusCode == 400 { + if string(reqBody) == "\"Username or email already exists\"" { + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Registration: %s", reqBody) + } + } else if reqStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) + } + return status, nil +} + +// this method matches a auth configuration to a server address or a url +func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { + if hostname == IndexServerAddress() || len(hostname) == 0 { + // default to the index server + return config.Configs[IndexServerAddress()] + } + + // First try the happy case + if c, found := config.Configs[hostname]; found { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + normalizedHostename := convertToHostname(hostname) + for registry, config := range config.Configs { + if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { + return config + } + } + + // When all else fails, return an empty auth config + return AuthConfig{} +} diff -Nru docker.io-0.9.1~dfsg1/registry/auth_test.go docker.io-1.3.2~dfsg1/registry/auth_test.go --- docker.io-0.9.1~dfsg1/registry/auth_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/auth_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,149 @@ +package registry + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + configFile := &ConfigFile{ + rootPath: root, + Configs: make(map[string]AuthConfig), + } + + for _, registry := range []string{"testIndex", IndexServerAddress()} { + configFile.Configs[registry] = AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + err = SaveConfig(configFile) + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.Configs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + for _, registry := range []string{"", IndexServerAddress()} { + resolved := configFile.ResolveAuthConfig(registry) + if resolved != configFile.Configs[IndexServerAddress()] { + t.Fail() + } + } +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + registryAuth := AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + configFile.Configs["https://registry.example.com/v1/"] = registryAuth + configFile.Configs["http://localhost:8000/v1/"] = localAuth + configFile.Configs["registry.com"] = registryAuth + + validRegistries := map[string][]string{ + "https://registry.example.com/v1/": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "http://localhost:8000/v1/": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + for _, registry := range registries { + var ( + configured AuthConfig + ok bool + ) + resolved := configFile.ResolveAuthConfig(registry) + if configured, ok = configFile.Configs[configKey]; !ok { + t.Fail() + } + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff -Nru docker.io-0.9.1~dfsg1/registry/endpoint.go docker.io-1.3.2~dfsg1/registry/endpoint.go --- docker.io-0.9.1~dfsg1/registry/endpoint.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/endpoint.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,213 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/pkg/log" +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. +func scanForApiVersion(hostname string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + if strings.HasSuffix(hostname, "/") { + chunks = strings.Split(hostname[:len(hostname)-1], "/") + apiVersionStr = chunks[len(chunks)-1] + } else { + chunks = strings.Split(hostname, "/") + apiVersionStr = chunks[len(chunks)-1] + } + for k, v := range apiVersions { + if apiVersionStr == v { + hostname = strings.Join(chunks[:len(chunks)-1], "/") + return hostname, k + } + } + return hostname, DefaultAPIVersion +} + +func NewEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { + endpoint, err := newEndpoint(hostname, insecureRegistries) + if err != nil { + return nil, err + } + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + + //TODO: triggering highland build can be done there without "failing" + + if endpoint.secure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return nil, fmt.Errorf("Invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + _, err2 := endpoint.Ping() + if err2 == nil { + return endpoint, nil + } + + return nil, fmt.Errorf("Invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return endpoint, nil +} +func newEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { + var ( + endpoint = Endpoint{} + trimmedHostname string + err error + ) + if !strings.HasPrefix(hostname, "http") { + hostname = "https://" + hostname + } + trimmedHostname, endpoint.Version = scanForApiVersion(hostname) + endpoint.URL, err = url.Parse(trimmedHostname) + if err != nil { + return nil, err + } + endpoint.secure, err = isSecure(endpoint.URL.Host, insecureRegistries) + if err != nil { + return nil, err + } + return &endpoint, nil +} + +type Endpoint struct { + URL *url.URL + Version APIVersion + secure bool +} + +// Get the formated URL for the root of this registry Endpoint +func (e Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), e.Version) +} + +func (e Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), version) +} + +func (e Endpoint) Ping() (RegistryInfo, error) { + if e.String() == IndexServerAddress() { + // Skip the check, we now this one is valid + // (and we never want to fallback to http in case of error) + return RegistryInfo{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.String()+"_ping", nil) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + resp, _, err := doRequest(req, nil, ConnectTimeout, e.secure) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return RegistryInfo{Standalone: false}, fmt.Errorf("Error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := RegistryInfo{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + log.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + log.Debugf("RegistryInfo.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + log.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + log.Debugf("RegistryInfo.Standalone: %t", info.Standalone) + return info, nil +} + +// isSecure returns false if the provided hostname is part of the list of insecure registries. +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by hostname, the latter is considered +// insecure. +// +// hostname should be a URL.Host (`host:port` or `host`) +func isSecure(hostname string, insecureRegistries []string) (bool, error) { + if hostname == IndexServerURL.Host { + return true, nil + } + + host, _, err := net.SplitHostPort(hostname) + if err != nil { + // assume hostname is of the form `host` without the port and go on. + host = hostname + } + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip == nil { + // if resolving `host` fails, error out, since host is to be net.Dial-ed anyway + return true, fmt.Errorf("issecure: could not resolve %q: %v", host, err) + } + addrs = []net.IP{ip} + } + if len(addrs) == 0 { + return true, fmt.Errorf("issecure: could not resolve %q", host) + } + + for _, addr := range addrs { + for _, r := range insecureRegistries { + // hostname matches insecure registry + if hostname == r { + return false, nil + } + + // now assume a CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err != nil { + // if could not parse it as a CIDR, even after removing + // assume it's not a CIDR and go on with the next candidate + continue + } + + // check if the addr falls in the subnet + if ipnet.Contains(addr) { + return false, nil + } + } + } + + return true, nil +} diff -Nru docker.io-0.9.1~dfsg1/registry/endpoint_test.go docker.io-1.3.2~dfsg1/registry/endpoint_test.go --- docker.io-0.9.1~dfsg1/registry/endpoint_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/endpoint_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,27 @@ +package registry + +import "testing" + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServerAddress(), IndexServerAddress()}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + } + for _, td := range testData { + e, err := newEndpoint(td.str, insecureRegistries) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} diff -Nru docker.io-0.9.1~dfsg1/registry/httpfactory.go docker.io-1.3.2~dfsg1/registry/httpfactory.go --- docker.io-0.9.1~dfsg1/registry/httpfactory.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/httpfactory.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,46 @@ +package registry + +import ( + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/utils" +) + +func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { + // FIXME: this replicates the 'info' job. + httpVersion := make([]utils.VersionInfo, 0, 4) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH}) + ud := utils.NewHTTPUserAgentDecorator(httpVersion...) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) + return factory +} + +// simpleVersionInfo is a simple implementation of +// the interface VersionInfo, which is used +// to provide version information for some product, +// component, etc. It stores the product name and the version +// in string and returns them on calls to Name() and Version(). +type simpleVersionInfo struct { + name string + version string +} + +func (v *simpleVersionInfo) Name() string { + return v.name +} + +func (v *simpleVersionInfo) Version() string { + return v.version +} diff -Nru docker.io-0.9.1~dfsg1/registry/MAINTAINERS docker.io-1.3.2~dfsg1/registry/MAINTAINERS --- docker.io-0.9.1~dfsg1/registry/MAINTAINERS 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -1,3 +1,5 @@ -Sam Alba (@samalba) -Joffrey Fuhrer (@shin-) -Ken Cochrane (@kencochrane) +Sam Alba (@samalba) +Joffrey Fuhrer (@shin-) +Ken Cochrane (@kencochrane) +Vincent Batts (@vbatts) +Olivier Gambier (@dmp42) diff -Nru docker.io-0.9.1~dfsg1/registry/registry.go docker.io-1.3.2~dfsg1/registry/registry.go --- docker.io-0.9.1~dfsg1/registry/registry.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/registry.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,71 +1,170 @@ package registry import ( - "bytes" - "crypto/sha256" - "encoding/json" + "crypto/tls" + "crypto/x509" "errors" "fmt" - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/utils" - "io" "io/ioutil" "net" "net/http" - "net/http/cookiejar" - "net/url" + "os" + "path" "regexp" - "strconv" "strings" "time" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" ) var ( ErrAlreadyExists = errors.New("Image already exists") ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + ErrDoesNotExist = errors.New("Image does not exist") errLoginRequired = errors.New("Authentication is required.") + validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + validNamespace = regexp.MustCompile(`^([a-z0-9_]{4,30})$`) + validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`) +) + +type TimeoutType uint32 + +const ( + NoTimeout TimeoutType = iota + ReceiveTimeout + ConnectTimeout ) -func pingRegistryEndpoint(endpoint string) (bool, error) { - if endpoint == auth.IndexServerAddress() { - // Skip the check, we now this one is valid - // (and we never want to fallback to http in case of error) - return false, nil - } - httpDial := func(proto string, addr string) (net.Conn, error) { - // Set the connect timeout to 5 seconds - conn, err := net.DialTimeout(proto, addr, time.Duration(5)*time.Second) +func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType, secure bool) *http.Client { + tlsConfig := tls.Config{ + RootCAs: roots, + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + } + + if cert != nil { + tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) + } + + if !secure { + tlsConfig.InsecureSkipVerify = true + } + + httpTransport := &http.Transport{ + DisableKeepAlives: true, + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tlsConfig, + } + + switch timeout { + case ConnectTimeout: + httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { + // Set the connect timeout to 5 seconds + conn, err := net.DialTimeout(proto, addr, 5*time.Second) + if err != nil { + return nil, err + } + // Set the recv timeout to 10 seconds + conn.SetDeadline(time.Now().Add(10 * time.Second)) + return conn, nil + } + case ReceiveTimeout: + httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { + conn, err := net.Dial(proto, addr) + if err != nil { + return nil, err + } + conn = utils.NewTimeoutConn(conn, 1*time.Minute) + return conn, nil + } + } + + return &http.Client{ + Transport: httpTransport, + CheckRedirect: AddRequiredHeadersToRedirectedRequests, + Jar: jar, + } +} + +func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { + var ( + pool *x509.CertPool + certs []*tls.Certificate + ) + + if secure && req.URL.Scheme == "https" { + hasFile := func(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false + } + + hostDir := path.Join("/etc/docker/certs.d", req.URL.Host) + log.Debugf("hostDir: %s", hostDir) + fs, err := ioutil.ReadDir(hostDir) + if err != nil && !os.IsNotExist(err) { + return nil, nil, err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if pool == nil { + pool = x509.NewCertPool() + } + log.Debugf("crt: %s", hostDir+"/"+f.Name()) + data, err := ioutil.ReadFile(path.Join(hostDir, f.Name())) + if err != nil { + return nil, nil, err + } + pool.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + log.Debugf("cert: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, keyName) { + return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName)) + if err != nil { + return nil, nil, err + } + certs = append(certs, &cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + log.Debugf("key: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, certName) { + return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } + } + } + } + + if len(certs) == 0 { + client := newClient(jar, pool, nil, timeout, secure) + res, err := client.Do(req) if err != nil { - return nil, err + return nil, nil, err } - // Set the recv timeout to 10 seconds - conn.SetDeadline(time.Now().Add(time.Duration(10) * time.Second)) - return conn, nil - } - httpTransport := &http.Transport{Dial: httpDial} - client := &http.Client{Transport: httpTransport} - resp, err := client.Get(endpoint + "_ping") - if err != nil { - return false, err - } - defer resp.Body.Close() - - if resp.Header.Get("X-Docker-Registry-Version") == "" { - return false, errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)") - } - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - utils.Debugf("Registry standalone header: '%s'", standalone) - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry - if standalone == "" { - return true, nil - // Accepted values are "true" (case-insensitive) and "1". - } else if strings.EqualFold(standalone, "true") || standalone == "1" { - return true, nil + return res, client, nil } - // Otherwise, not standalone - return false, nil + + for i, cert := range certs { + client := newClient(jar, pool, cert, timeout, secure) + res, err := client.Do(req) + // If this is the last cert, otherwise, continue to next cert if 403 or 5xx + if i == len(certs)-1 || err == nil && res.StatusCode != 403 && res.StatusCode < 500 { + return res, client, err + } + } + + return nil, nil, nil } func validateRepositoryName(repositoryName string) error { @@ -77,15 +176,17 @@ if len(nameParts) < 2 { namespace = "library" name = nameParts[0] + + if validHex.MatchString(name) { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } } else { namespace = nameParts[0] name = nameParts[1] } - validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`) if !validNamespace.MatchString(namespace) { return fmt.Errorf("Invalid namespace name (%s), only [a-z0-9_] are allowed, size between 4 and 30", namespace) } - validRepo := regexp.MustCompile(`^([a-z0-9-_.]+)$`) if !validRepo.MatchString(name) { return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name) } @@ -99,16 +200,11 @@ return "", "", ErrInvalidRepositoryName } nameParts := strings.SplitN(reposName, "/", 2) - if !strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && - nameParts[0] != "localhost" { + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && + nameParts[0] != "localhost") { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) err := validateRepositoryName(reposName) - return auth.IndexServerAddress(), reposName, err - } - if len(nameParts) < 2 { - // There is a dot in repos name (and no registry address) - // Is it a Registry address without repos name? - return "", "", ErrInvalidRepositoryName + return IndexServerAddress(), reposName, err } hostname := nameParts[0] reposName = nameParts[1] @@ -122,603 +218,36 @@ return hostname, reposName, nil } -// this method expands the registry name as used in the prefix of a repo -// to a full url. if it already is a url, there will be no change. -// The registry is pinged to test if it http or https -func ExpandAndVerifyRegistryUrl(hostname string) (string, error) { - if strings.HasPrefix(hostname, "http:") || strings.HasPrefix(hostname, "https:") { - // if there is no slash after https:// (8 characters) then we have no path in the url - if strings.LastIndex(hostname, "/") < 9 { - // there is no path given. Expand with default path - hostname = hostname + "/v1/" - } - if _, err := pingRegistryEndpoint(hostname); err != nil { - return "", errors.New("Invalid Registry endpoint: " + err.Error()) - } - return hostname, nil - } - endpoint := fmt.Sprintf("https://%s/v1/", hostname) - if _, err := pingRegistryEndpoint(endpoint); err != nil { - utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err) - endpoint = fmt.Sprintf("http://%s/v1/", hostname) - if _, err = pingRegistryEndpoint(endpoint); err != nil { - //TODO: triggering highland build can be done there without "failing" - return "", errors.New("Invalid Registry endpoint: " + err.Error()) - } - } - return endpoint, nil -} - -func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) { - for _, cookie := range c.Jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - res, err := c.Do(req) - if err != nil { - return nil, err - } - if len(res.Cookies()) > 0 { - c.Jar.SetCookies(req.URL, res.Cookies()) - } - return res, err -} - -func setTokenAuth(req *http.Request, token []string) { - if req.Header.Get("Authorization") == "" { // Don't override - req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) - } -} - -// Retrieve the history of a given image from the Registry. -// Return a list of the parent's json (requested image included) -func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { - req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) - if err != nil { - return nil, err - } - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errLoginRequired - } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, fmt.Errorf("Error while reading the http response: %s", err) - } - - utils.Debugf("Ancestry: %s", jsonString) - history := new([]string) - if err := json.Unmarshal(jsonString, history); err != nil { - return nil, err - } - return *history, nil -} - -// Check if an image exists in the Registry -// TODO: This method should return the errors instead of masking them and returning false -func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { - - req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) - if err != nil { - utils.Errorf("Error in LookupRemoteImage %s", err) - return false - } - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - utils.Errorf("Error in LookupRemoteImage %s", err) +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { return false } - res.Body.Close() - return res.StatusCode == 200 -} -// Retrieve an image from the Registry. -func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { - // Get the JSON - req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - - imageSize, err := strconv.Atoi(res.Header.Get("X-Docker-Size")) - if err != nil { - return nil, -1, err - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (io.ReadCloser, error) { - req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/layer", nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %s\n", err) - } - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - return res.Body, nil -} - -func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { - if strings.Count(repository, "/") == 0 { - // This will be removed once the Registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - req, err := r.reqFactory.NewRequest("GET", endpoint, nil) - - if err != nil { - return nil, err - } - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - return nil, err - } - - utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode != 200 && res.StatusCode != 404 { - continue - } else if res.StatusCode == 404 { - return nil, fmt.Errorf("Repository not found") - } - - result := make(map[string]string) - rawJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true } - if err := json.Unmarshal(rawJSON, &result); err != nil { - return nil, err - } - return result, nil } - return nil, fmt.Errorf("Could not reach any registry endpoint") + return false } -func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { - indexEp := r.indexEndpoint - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote) - - utils.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - if r.authConfig != nil && len(r.authConfig.Username) > 0 { - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - } - req.Header.Set("X-Docker-Token", "true") - - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errLoginRequired - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode != 200 { - return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } - - var tokens []string - if res.Header.Get("X-Docker-Token") != "" { - tokens = res.Header["X-Docker-Token"] - } - - var endpoints []string - var urlScheme = indexEp[:strings.Index(indexEp, ":")] - if res.Header.Get("X-Docker-Endpoints") != "" { - // The Registry's URL scheme has to match the Index' - for _, ep := range res.Header["X-Docker-Endpoints"] { - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) - } - } else { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - - checksumsJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - remoteChecksums := []*ImgData{} - if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - Tokens: tokens, - }, nil -} - -func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { - - utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") - - req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) - if err != nil { - return err - } - setTokenAuth(req, token) - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := doWithCookies(r.client, req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody) - } - return nil -} - -// Push a local image to the registry -func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { - - utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") - - req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - setTokenAuth(req, token) - - res, err := doWithCookies(r.client, req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res) - } - return nil -} - -func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - - utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") - - h := sha256.New() - checksumLayer := &utils.CheckSum{Reader: layer, Hash: h} - tarsumLayer := &utils.TarSum{Reader: checksumLayer} - - req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", tarsumLayer) - if err != nil { - return "", "", err - } - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %s", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + checksumLayer.Sum() - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// push a tag on the registry. -// Remote has the format '/ -func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) - - req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - setTokenAuth(req, token) - req.ContentLength = int64(len(revision)) - res, err := doWithCookies(r.client, req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) - } - return nil -} - -func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - indexEp := r.indexEndpoint - - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix) - utils.Debugf("[registry] PUT %s", u) - utils.Debugf("Image list pushed to index:\n%s", imgListJSON) - req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) - if err != nil { - return nil, err - } - req.Header.Add("Content-type", "application/json") - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") - if validate { - req.Header["X-Docker-Endpoints"] = regs - } - - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - - // Redirect if necessary - for res.StatusCode >= 300 && res.StatusCode < 400 { - utils.Debugf("Redirected to %s", res.Header.Get("Location")) - req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) - if err != nil { - return nil, err - } - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") - if validate { - req.Header["X-Docker-Endpoints"] = regs - } - res, err = r.client.Do(req) - if err != nil { - return nil, err +func AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil } - defer res.Body.Close() - } - - var tokens, endpoints []string - var urlScheme = indexEp[:strings.Index(indexEp, ":")] - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res) - } - if res.Header.Get("X-Docker-Token") != "" { - tokens = res.Header["X-Docker-Token"] - utils.Debugf("Auth token: %v", tokens) - } else { - return nil, fmt.Errorf("Index response didn't contain an access token") - } - - if res.Header.Get("X-Docker-Endpoints") != "" { - // The Registry's URL scheme has to match the Index' - for _, ep := range res.Header["X-Docker-Endpoints"] { - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) - } - } else { - return nil, fmt.Errorf("Index response didn't contain any endpoints") } } - if validate { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res) - } - } - - return &RepositoryData{ - Tokens: tokens, - Endpoints: endpoints, - }, nil -} - -func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { - utils.Debugf("Index server: %s", r.indexEndpoint) - u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term) - req, err := r.reqFactory.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - if r.authConfig != nil && len(r.authConfig.Username) > 0 { - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - } - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) - } - rawData, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - result := new(SearchResults) - err = json.Unmarshal(rawData, result) - return result, err -} - -func (r *Registry) GetAuthConfig(withPasswd bool) *auth.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &auth.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - Email: r.authConfig.Email, - } -} - -type SearchResult struct { - StarCount int `json:"star_count"` - IsOfficial bool `json:"is_official"` - Name string `json:"name"` - IsTrusted bool `json:"is_trusted"` - Description string `json:"description"` -} - -type SearchResults struct { - Query string `json:"query"` - NumResults int `json:"num_results"` - Results []SearchResult `json:"results"` -} - -type RepositoryData struct { - ImgList map[string]*ImgData - Endpoints []string - Tokens []string -} - -type ImgData struct { - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -type Registry struct { - client *http.Client - authConfig *auth.AuthConfig - reqFactory *utils.HTTPRequestFactory - indexEndpoint string -} - -func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) { - httpTransport := &http.Transport{ - DisableKeepAlives: true, - Proxy: http.ProxyFromEnvironment, - } - - r = &Registry{ - authConfig: authConfig, - client: &http.Client{ - Transport: httpTransport, - }, - indexEndpoint: indexEndpoint, - } - r.client.Jar, err = cookiejar.New(nil) - if err != nil { - return nil, err - } - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside our requests. - if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") { - standalone, err := pingRegistryEndpoint(indexEndpoint) - if err != nil { - return nil, err - } - if standalone { - utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint) - dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) - factory.AddDecorator(dec) - } - } - - r.reqFactory = factory - return r, nil + return nil } diff -Nru docker.io-0.9.1~dfsg1/registry/registry_mock_test.go docker.io-1.3.2~dfsg1/registry/registry_mock_test.go --- docker.io-0.9.1~dfsg1/registry/registry_mock_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/registry_mock_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,11 +2,11 @@ import ( "encoding/json" + "errors" "fmt" - "github.com/dotcloud/docker/utils" - "github.com/gorilla/mux" "io" "io/ioutil" + "net" "net/http" "net/http/httptest" "net/url" @@ -14,11 +14,16 @@ "strings" "testing" "time" + + "github.com/gorilla/mux" + + "github.com/docker/docker/pkg/log" ) var ( - testHttpServer *httptest.Server - testLayers = map[string]map[string]string{ + testHTTPServer *httptest.Server + insecureRegistries []string + testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", @@ -77,10 +82,17 @@ "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", }, } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + } ) func init() { r := mux.NewRouter() + + // /v1/ r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") @@ -91,19 +103,47 @@ r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") r.HandleFunc("/v1/search", handlerSearch).Methods("GET") - testHttpServer = httptest.NewServer(handlerAccessLog(r)) + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + URL, err := url.Parse(testHTTPServer.URL) + if err != nil { + panic(err) + } + insecureRegistries = []string{URL.Host} + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } } func handlerAccessLog(handler http.Handler) http.Handler { logHandler := func(w http.ResponseWriter, r *http.Request) { - utils.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) } return http.HandlerFunc(logHandler) } func makeURL(req string) string { - return testHttpServer.URL + req + return testHTTPServer.URL + req } func writeHeaders(w http.ResponseWriter) { @@ -234,6 +274,7 @@ tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) + return } if r.Method == "DELETE" { delete(testRepositories, repositoryName) @@ -253,10 +294,12 @@ tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) + return } tag, exists := tags[tagName] if !exists { apiError(w, "Tag not found", 404) + return } writeResponse(w, tag, 200) } @@ -290,8 +333,8 @@ } func handlerImages(w http.ResponseWriter, r *http.Request) { - u, _ := url.Parse(testHttpServer.URL) - w.Header().Add("X-Docker-Endpoints", u.Host) + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { if strings.HasSuffix(r.URL.Path, "images") { @@ -321,7 +364,12 @@ } func handlerSearch(w http.ResponseWriter, r *http.Request) { - writeResponse(w, "{}", 200) + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) } func TestPing(t *testing.T) { diff -Nru docker.io-0.9.1~dfsg1/registry/registry_test.go docker.io-1.3.2~dfsg1/registry/registry_test.go --- docker.io-0.9.1~dfsg1/registry/registry_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/registry_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,10 +1,13 @@ package registry import ( - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/utils" + "fmt" + "net/http" + "net/url" "strings" "testing" + + "github.com/docker/docker/utils" ) var ( @@ -13,9 +16,13 @@ REPO = "foo42/bar" ) -func spawnTestRegistry(t *testing.T) *Registry { - authConfig := &auth.AuthConfig{} - r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/")) +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &AuthConfig{} + endpoint, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) + if err != nil { + t.Fatal(err) + } + r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true) if err != nil { t.Fatal(err) } @@ -23,15 +30,19 @@ } func TestPingRegistryEndpoint(t *testing.T) { - standalone, err := pingRegistryEndpoint(makeURL("/v1/")) + ep, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() if err != nil { t.Fatal(err) } - assertEqual(t, standalone, true, "Expected standalone to be true (default)") + assertEqual(t, regInfo.Standalone, true, "Expected standalone to be true (default)") } func TestGetRemoteHistory(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) @@ -43,7 +54,7 @@ } func TestLookupRemoteImage(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) assertEqual(t, found, true, "Expected remote lookup to succeed") found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) @@ -51,7 +62,7 @@ } func TestGetRemoteImageJSON(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) @@ -68,8 +79,8 @@ } func TestGetRemoteImageLayer(t *testing.T) { - r := spawnTestRegistry(t) - data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN) + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) if err != nil { t.Fatal(err) } @@ -77,14 +88,14 @@ t.Fatal("Expected non-nil data result") } - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN) + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTags(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) if err != nil { t.Fatal(err) @@ -99,17 +110,28 @@ } func TestGetRepositoryData(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) + parsedUrl, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedUrl.Host + "/v1/" data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") - assertEqual(t, len(data.Endpoints), 1, "Expected one endpoint in Endpoints") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + } func TestPushImageJSONRegistry(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", @@ -122,7 +144,7 @@ } func TestPushImageLayerRegistry(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) if err != nil { @@ -131,13 +153,13 @@ } func TestResolveRepositoryName(t *testing.T) { - _, _, err := ResolveRepositoryName("https://github.com/dotcloud/docker") + _, _, err := ResolveRepositoryName("https://github.com/docker/docker") assertEqual(t, err, ErrInvalidRepositoryName, "Expected error invalid repo name") ep, repo, err := ResolveRepositoryName("fooo/bar") if err != nil { t.Fatal(err) } - assertEqual(t, ep, auth.IndexServerAddress(), "Expected endpoint to be index server address") + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be index server address") assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar") u := makeURL("")[7:] @@ -147,10 +169,17 @@ } assertEqual(t, ep, u, "Expected endpoint to be "+u) assertEqual(t, repo, "private/moonbase", "Expected endpoint to be private/moonbase") + + ep, repo, err = ResolveRepositoryName("ubuntu-12.04-base") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be "+IndexServerAddress()) + assertEqual(t, repo, "ubuntu-12.04-base", "Expected endpoint to be ubuntu-12.04-base") } func TestPushRegistryTag(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) @@ -158,7 +187,7 @@ } func TestPushImageJSONIndex(t *testing.T) { - r := spawnTestRegistry(t) + r := spawnTestRegistrySession(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", @@ -176,7 +205,7 @@ if repoData == nil { t.Fatal("Expected RepositoryData object") } - repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint}) + repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } @@ -186,23 +215,141 @@ } func TestSearchRepositories(t *testing.T) { - r := spawnTestRegistry(t) - results, err := r.SearchRepositories("supercalifragilisticepsialidocious") + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery") if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } - assertEqual(t, results.NumResults, 0, "Expected 0 search results") + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") } func TestValidRepositoryName(t *testing.T) { if err := validateRepositoryName("docker/docker"); err != nil { t.Fatal(err) } + // Support 64-byte non-hexadecimal names (hexadecimal names are forbidden) + if err := validateRepositoryName("thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev"); err != nil { + t.Fatal(err) + } if err := validateRepositoryName("docker/Docker"); err == nil { t.Log("Repository name should be invalid") t.Fail() } + if err := validateRepositoryName("docker///docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } + if err := validateRepositoryName("1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a"); err == nil { + t.Log("Repository name should be invalid, 64-byte hexadecimal names forbidden") + t.Fail() + } +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecure(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexServerURL.Host, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + } + for _, tt := range tests { + // TODO: remove this once we remove localhost insecure by default + insecureRegistries := append(tt.insecureRegistries, "127.0.0.0/8") + if sec, err := isSecure(tt.addr, insecureRegistries); err != nil || sec != tt.expected { + t.Fatalf("isSecure failed for %q %v, expected %v got %v. Error: %v", tt.addr, insecureRegistries, tt.expected, sec, err) + } + } } diff -Nru docker.io-0.9.1~dfsg1/registry/service.go docker.io-1.3.2~dfsg1/registry/service.go --- docker.io-0.9.1~dfsg1/registry/service.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/service.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,118 @@ +package registry + +import ( + "github.com/docker/docker/engine" +) + +// Service exposes registry capabilities in the standard Engine +// interface. Once installed, it extends the engine with the +// following calls: +// +// 'auth': Authenticate against the public registry +// 'search': Search for images on the public registry +// 'pull': Download images from any registry (TODO) +// 'push': Upload images to any registry (TODO) +type Service struct { + insecureRegistries []string +} + +// NewService returns a new instance of Service ready to be +// installed no an engine. +func NewService(insecureRegistries []string) *Service { + return &Service{ + insecureRegistries: insecureRegistries, + } +} + +// Install installs registry capabilities to eng. +func (s *Service) Install(eng *engine.Engine) error { + eng.Register("auth", s.Auth) + eng.Register("search", s.Search) + return nil +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(job *engine.Job) engine.Status { + var authConfig = new(AuthConfig) + + job.GetenvJson("authConfig", authConfig) + + if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { + endpoint, err := NewEndpoint(addr, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + if _, err := endpoint.Ping(); err != nil { + return job.Error(err) + } + authConfig.ServerAddress = endpoint.String() + } + + status, err := Login(authConfig, HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + + return engine.StatusOK +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +// +// Argument syntax: search TERM +// +// Option environment: +// 'authConfig': json-encoded credentials to authenticate against the registry. +// The search extends to images only accessible via the credentials. +// +// 'metaHeaders': extra HTTP headers to include in the request to the registry. +// The headers should be passed as a json-encoded dictionary. +// +// Output: +// Results are sent as a collection of structured messages (using engine.Table). +// Each result is sent as a separate message. +// Results are ordered by number of stars on the public registry. +func (s *Service) Search(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = &AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + hostname, term, err := ResolveRepositoryName(term) + if err != nil { + return job.Error(err) + } + + endpoint, err := NewEndpoint(hostname, s.insecureRegistries) + if err != nil { + return job.Error(err) + } + r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + results, err := r.SearchRepositories(term) + if err != nil { + return job.Error(err) + } + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/registry/session.go docker.io-1.3.2~dfsg1/registry/session.go --- docker.io-0.9.1~dfsg1/registry/session.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/session.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,617 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/utils" +) + +type Session struct { + authConfig *AuthConfig + reqFactory *utils.HTTPRequestFactory + indexEndpoint *Endpoint + jar *cookiejar.Jar + timeout TimeoutType +} + +func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + indexEndpoint: endpoint, + } + + if timeout { + r.timeout = ReceiveTimeout + } + + r.jar, err = cookiejar.New(nil) + if err != nil { + return nil, err + } + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside our requests. + if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" { + info, err := r.indexEndpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone { + log.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", r.indexEndpoint.String()) + dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) + factory.AddDecorator(dec) + } + } + + r.reqFactory = factory + return r, nil +} + +func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) { + return doRequest(req, r.jar, r.timeout, r.indexEndpoint.secure) +} + +// Retrieve the history of a given image from the Registry. +// Return a list of the parent's json (requested image included) +func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + + log.Debugf("Ancestry: %s", jsonString) + history := new([]string) + if err := json.Unmarshal(jsonString, history); err != nil { + return nil, err + } + return *history, nil +} + +// Check if an image exists in the Registry +// TODO: This method should return the errors instead of masking them and returning false +func (r *Session) LookupRemoteImage(imgID, registry string, token []string) bool { + + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + res.Body.Close() + return res.StatusCode == 200 +} + +// Retrieve an image from the Registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { + // Get the JSON + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + statusCode = 0 + client *http.Client + res *http.Response + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := r.reqFactory.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %s\n", err) + } + setTokenAuth(req, token) + for i := 1; i <= retries; i++ { + statusCode = 0 + res, client, err = r.doRequest(req) + if err != nil { + log.Debugf("Error contacting registry: %s", err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + if i == retries { + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + time.Sleep(time.Duration(i) * 5 * time.Second) + continue + } + break + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + log.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil + } + log.Debugf("server doesn't support resume") + return res.Body, nil +} + +func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the Registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + req, err := r.reqFactory.NewRequest("GET", endpoint, nil) + + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + + log.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode != 200 && res.StatusCode != 404 { + continue + } else if res.StatusCode == 404 { + return nil, fmt.Errorf("Repository not found") + } + + result := make(map[string]string) + rawJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + if err := json.Unmarshal(rawJSON, &result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedUrl, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedUrl.Scheme + // The Registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) + + log.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } + + var tokens []string + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + checksumsJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + remoteChecksums := []*ImgData{} + if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + Tokens: tokens, + }, nil +} + +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) + if err != nil { + return err + } + setTokenAuth(req, token) + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody) + } + return nil +} + +// Push a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res) + } + return nil +} + +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %s", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// push a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + + req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + req.ContentLength = int64(len(revision)) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + } + return nil +} + +func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + log.Debugf("[registry] PUT %s", u) + log.Debugf("Image list pushed to index:\n%s", imgListJSON) + req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.Header.Add("Content-type", "application/json") + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Redirect if necessary + for res.StatusCode >= 300 && res.StatusCode < 400 { + log.Debugf("Redirected to %s", res.Header.Get("Location")) + req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res) + } + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + log.Debugf("Auth token: %v", tokens) + } else { + return nil, fmt.Errorf("Index response didn't contain an access token") + } + + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + } + if validate { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res) + } + } + + return &RepositoryData{ + Tokens: tokens, + Endpoints: endpoints, + }, nil +} + +func (r *Session) SearchRepositories(term string) (*SearchResults, error) { + log.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + req, err := r.reqFactory.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) + } + rawData, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + result := new(SearchResults) + err = json.Unmarshal(rawData, result) + return result, err +} + +func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} + +func setTokenAuth(req *http.Request, token []string) { + if req.Header.Get("Authorization") == "" { // Don't override + req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) + } +} diff -Nru docker.io-0.9.1~dfsg1/registry/session_v2.go docker.io-1.3.2~dfsg1/registry/session_v2.go --- docker.io-0.9.1~dfsg1/registry/session_v2.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/session_v2.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,390 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" + "github.com/gorilla/mux" +) + +func newV2RegistryRouter() *mux.Router { + router := mux.NewRouter() + + v2Router := router.PathPrefix("/v2/").Subrouter() + + // Version Info + v2Router.Path("/version").Name("version") + + // Image Manifests + v2Router.Path("/manifest/{imagename:[a-z0-9-._/]+}/{tagname:[a-zA-Z0-9-._]+}").Name("manifests") + + // List Image Tags + v2Router.Path("/tags/{imagename:[a-z0-9-._/]+}").Name("tags") + + // Download a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("downloadBlob") + + // Upload a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}").Name("uploadBlob") + + // Mounting a blob in an image + v2Router.Path("/mountblob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("mountBlob") + + return router +} + +// APIVersion2 /v2/ +var v2HTTPRoutes = newV2RegistryRouter() + +func getV2URL(e *Endpoint, routeName string, vars map[string]string) (*url.URL, error) { + route := v2HTTPRoutes.Get(routeName) + if route == nil { + return nil, fmt.Errorf("unknown regisry v2 route name: %q", routeName) + } + + varReplace := make([]string, 0, len(vars)*2) + for key, val := range vars { + varReplace = append(varReplace, key, val) + } + + routePath, err := route.URLPath(varReplace...) + if err != nil { + return nil, fmt.Errorf("unable to make registry route %q with vars %v: %s", routeName, vars, err) + } + u, err := url.Parse(REGISTRYSERVER) + if err != nil { + return nil, fmt.Errorf("invalid registry url: %s", err) + } + + return &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: routePath.Path, + }, nil +} + +// V2 Provenance POC + +func (r *Session) GetV2Version(token []string) (*RegistryInfo, error) { + routeURL, err := getV2URL(r.indexEndpoint, "version", nil) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d fetching Version", res.StatusCode), res) + } + + decoder := json.NewDecoder(res.Body) + versionInfo := new(RegistryInfo) + + err = decoder.Decode(versionInfo) + if err != nil { + return nil, fmt.Errorf("unable to decode GetV2Version JSON response: %s", err) + } + + return versionInfo, nil +} + +// +// 1) Check if TarSum of each layer exists /v2/ +// 1.a) if 200, continue +// 1.b) if 300, then push the +// 1.c) if anything else, err +// 2) PUT the created/signed manifest +// +func (r *Session) GetV2ImageManifest(imageName, tagName string, token []string) ([]byte, error) { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) + } + + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + return buf, nil +} + +// - Succeeded to mount for this image scope +// - Failed with no error (So continue to Push the Blob) +// - Failed with error +func (r *Session) PostV2ImageMountBlob(imageName, sumType, sum string, token []string) (bool, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "mountBlob", vars) + if err != nil { + return false, err + } + + method := "POST" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return false, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return false, err + } + res.Body.Close() // close early, since we're not needing a body on this call .. yet? + switch res.StatusCode { + case 200: + // return something indicating no push needed + return true, nil + case 300: + // return something indicating blob push needed + return false, nil + } + return false, fmt.Errorf("Failed to mount %q - %s:%s : %d", imageName, sumType, sum, res.StatusCode) +} + +func (r *Session) GetV2ImageBlob(imageName, sumType, sum string, blobWrtr io.Writer, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + + _, err = io.Copy(blobWrtr, res.Body) + return err +} + +func (r *Session) GetV2ImageBlobReader(imageName, sumType, sum string, token []string) (io.ReadCloser, int64, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return nil, 0, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, 0, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, 0, err + } + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, 0, errLoginRequired + } + return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + lenStr := res.Header.Get("Content-Length") + l, err := strconv.ParseInt(lenStr, 10, 64) + if err != nil { + return nil, 0, err + } + + return res.Body, l, err +} + +// Push the image to the server for storage. +// 'layer' is an uncompressed reader of the blob to be pushed. +// The server will generate it's own checksum calculation. +func (r *Session) PutV2ImageBlob(imageName, sumType string, blobRdr io.Reader, token []string) (serverChecksum string, err error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + } + + routeURL, err := getV2URL(r.indexEndpoint, "uploadBlob", vars) + if err != nil { + return "", err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), blobRdr) + if err != nil { + return "", err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", err + } + defer res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return "", errLoginRequired + } + return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob", res.StatusCode, imageName), res) + } + + type sumReturn struct { + Checksum string `json:"checksum"` + } + + decoder := json.NewDecoder(res.Body) + var sumInfo sumReturn + + err = decoder.Decode(&sumInfo) + if err != nil { + return "", fmt.Errorf("unable to decode PutV2ImageBlob JSON response: %s", err) + } + + // XXX this is a json struct from the registry, with its checksum + return sumInfo.Checksum, nil +} + +// Finally Push the (signed) manifest of the blobs we've just pushed +func (r *Session) PutV2ImageManifest(imageName, tagName string, manifestRdr io.Reader, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), manifestRdr) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) + } + + return nil +} + +// Given a repository name, returns a json array of string tags +func (r *Session) GetV2RemoteTags(imageName string, token []string) ([]string, error) { + vars := map[string]string{ + "imagename": imageName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "tags", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) + } + + decoder := json.NewDecoder(res.Body) + var tags []string + err = decoder.Decode(&tags) + if err != nil { + return nil, fmt.Errorf("Error while decoding the http response: %s", err) + } + return tags, nil +} diff -Nru docker.io-0.9.1~dfsg1/registry/types.go docker.io-1.3.2~dfsg1/registry/types.go --- docker.io-0.9.1~dfsg1/registry/types.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/registry/types.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,67 @@ +package registry + +type SearchResult struct { + StarCount int `json:"star_count"` + IsOfficial bool `json:"is_official"` + Name string `json:"name"` + IsTrusted bool `json:"is_trusted"` + Description string `json:"description"` +} + +type SearchResults struct { + Query string `json:"query"` + NumResults int `json:"num_results"` + Results []SearchResult `json:"results"` +} + +type RepositoryData struct { + ImgList map[string]*ImgData + Endpoints []string + Tokens []string +} + +type ImgData struct { + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +type RegistryInfo struct { + Version string `json:"version"` + Standalone bool `json:"standalone"` +} + +type FSLayer struct { + BlobSum string `json:"blobSum"` +} + +type ManifestHistory struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type ManifestData struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []*FSLayer `json:"fsLayers"` + History []*ManifestHistory `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var DefaultAPIVersion APIVersion = APIVersion1 +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +const ( + APIVersion1 = iota + 1 + APIVersion2 +) diff -Nru docker.io-0.9.1~dfsg1/runconfig/compare.go docker.io-1.3.2~dfsg1/runconfig/compare.go --- docker.io-0.9.1~dfsg1/runconfig/compare.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/compare.go 2014-11-24 17:38:01.000000000 +0000 @@ -14,12 +14,10 @@ a.MemorySwap != b.MemorySwap || a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty || - a.VolumesFrom != b.VolumesFrom { + a.Tty != b.Tty { return false } if len(a.Cmd) != len(b.Cmd) || - len(a.Dns) != len(b.Dns) || len(a.Env) != len(b.Env) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || @@ -33,11 +31,6 @@ return false } } - for i := 0; i < len(a.Dns); i++ { - if a.Dns[i] != b.Dns[i] { - return false - } - } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false diff -Nru docker.io-0.9.1~dfsg1/runconfig/config.go docker.io-1.3.2~dfsg1/runconfig/config.go --- docker.io-0.9.1~dfsg1/runconfig/config.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/config.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,8 +1,8 @@ package runconfig import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" ) // Note: the Config structure should hold only portable information about the container. @@ -12,9 +12,10 @@ Hostname string Domainname string User string - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 // CPU shares (relative weight vs. other containers) + Cpuset string // Cpuset 0-2, 0,1 AttachStdin bool AttachStdout bool AttachStderr bool @@ -25,10 +26,8 @@ StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string - Dns []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} - VolumesFrom string WorkingDir string Entrypoint []string NetworkDisabled bool @@ -43,6 +42,7 @@ Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), + Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), @@ -50,7 +50,6 @@ OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), - VolumesFrom: job.Getenv("VolumesFrom"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), } @@ -65,12 +64,8 @@ if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } - if Dns := job.GetenvList("Dns"); Dns != nil { - config.Dns = Dns - } if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } - return config } diff -Nru docker.io-0.9.1~dfsg1/runconfig/config_test.go docker.io-1.3.2~dfsg1/runconfig/config_test.go --- docker.io-0.9.1~dfsg1/runconfig/config_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/config_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,59 +1,189 @@ package runconfig import ( - "github.com/dotcloud/docker/nat" + "fmt" + "strings" "testing" + + "github.com/docker/docker/nat" ) +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "), nil) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } + + if _, _, err := parse(t, "--link a"); err == nil { + t.Fatalf("Error parsing links. `--link a` should be an error but is not") + } + if _, _, err := parse(t, "--link"); err == nil { + t.Fatalf("Error parsing links. `--link` should be an error but is not") + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:ro"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config2 := Config{ - Dns: []string{"0.0.0.0", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, } config3 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config4 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "22222222", - Volumes: volumes1, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, } volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} config5 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes2, - } - if Compare(&config1, &config2) { - t.Fatalf("Compare should return false, Dns are different") + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes2, } if Compare(&config1, &config3) { t.Fatalf("Compare should return false, PortSpecs are different") } - if Compare(&config1, &config4) { - t.Fatalf("Compare should return false, VolumesFrom are different") - } if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } @@ -67,17 +197,14 @@ volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} configImage := &Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "1111", - Volumes: volumesImage, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, } volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &Config{ - Dns: []string{"3.3.3.3"}, PortSpecs: []string{"3333:2222", "3333:3333"}, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, @@ -87,15 +214,6 @@ t.Error(err) } - if len(configUser.Dns) != 3 { - t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) - } - for _, dns := range configUser.Dns { - if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { - t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) - } - } - if len(configUser.ExposedPorts) != 3 { t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } @@ -122,10 +240,6 @@ } } - if configUser.VolumesFrom != "1111" { - t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) - } - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) diff -Nru docker.io-0.9.1~dfsg1/runconfig/exec.go docker.io-1.3.2~dfsg1/runconfig/exec.go --- docker.io-0.9.1~dfsg1/runconfig/exec.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/exec.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,75 @@ +package runconfig + +import ( + "github.com/docker/docker/engine" + flag "github.com/docker/docker/pkg/mflag" +) + +type ExecConfig struct { + User string + Privileged bool + Tty bool + Container string + AttachStdin bool + AttachStderr bool + AttachStdout bool + Detach bool + Cmd []string +} + +func ExecConfigFromJob(job *engine.Job) *ExecConfig { + execConfig := &ExecConfig{ + User: job.Getenv("User"), + Privileged: job.GetenvBool("Privileged"), + Tty: job.GetenvBool("Tty"), + Container: job.Getenv("Container"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStderr: job.GetenvBool("AttachStderr"), + AttachStdout: job.GetenvBool("AttachStdout"), + } + if cmd := job.GetenvList("Cmd"); cmd != nil { + execConfig.Cmd = cmd + } + + return execConfig +} + +func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + execCmd []string + container string + ) + if err := cmd.Parse(args); err != nil { + return nil, err + } + parsedArgs := cmd.Args() + if len(parsedArgs) > 1 { + container = cmd.Arg(0) + execCmd = parsedArgs[1:] + } + + execConfig := &ExecConfig{ + // TODO(vishh): Expose '-u' flag once it is supported. + User: "", + // TODO(vishh): Expose '-p' flag once it is supported. + Privileged: false, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff -Nru docker.io-0.9.1~dfsg1/runconfig/hostconfig.go docker.io-1.3.2~dfsg1/runconfig/hostconfig.go --- docker.io-0.9.1~dfsg1/runconfig/hostconfig.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/hostconfig.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,39 +1,121 @@ package runconfig import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "github.com/docker/docker/utils" ) +type NetworkMode string + +// IsPrivate indicates whether container use it's private network stack +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer() || n.IsNone()) +} + +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + type HostConfig struct { Binds []string ContainerIDFile string - LxcConf []KeyValuePair + LxcConf []utils.KeyValuePair Privileged bool PortBindings nat.PortMap Links []string PublishAllPorts bool + Dns []string + DnsSearch []string + ExtraHosts []string + VolumesFrom []string + Devices []DeviceMapping + NetworkMode NetworkMode + CapAdd []string + CapDrop []string + RestartPolicy RestartPolicy + SecurityOpt []string +} + +// This is used by the create command when you want to set both the +// Config and the HostConfig in the same call +type ConfigAndHostConfig struct { + Config + HostConfig HostConfig } -type KeyValuePair struct { - Key string - Value string +func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig { + return &ConfigAndHostConfig{ + *config, + *hostConfig, + } } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + if job.EnvExists("HostConfig") { + hostConfig := HostConfig{} + job.GetenvJson("HostConfig", &hostConfig) + return &hostConfig + } + hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), + NetworkMode: NetworkMode(job.Getenv("NetworkMode")), } + job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) + job.GetenvJson("Devices", &hostConfig.Devices) + job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) + hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } + if Dns := job.GetenvList("Dns"); Dns != nil { + hostConfig.Dns = Dns + } + if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { + hostConfig.DnsSearch = DnsSearch + } + if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { + hostConfig.ExtraHosts = ExtraHosts + } + if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { + hostConfig.VolumesFrom = VolumesFrom + } + if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { + hostConfig.CapAdd = CapAdd + } + if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { + hostConfig.CapDrop = CapDrop + } return hostConfig } diff -Nru docker.io-0.9.1~dfsg1/runconfig/merge.go docker.io-1.3.2~dfsg1/runconfig/merge.go --- docker.io-0.9.1~dfsg1/runconfig/merge.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/merge.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,9 +1,10 @@ package runconfig import ( - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/utils" "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" ) func Merge(userConf, imageConf *Config) error { @@ -19,7 +20,7 @@ if userConf.CpuShares == 0 { userConf.CpuShares = imageConf.CpuShares } - if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { + if len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { if userConf.ExposedPorts == nil { @@ -32,7 +33,7 @@ } } - if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { + if len(userConf.PortSpecs) > 0 { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } @@ -47,9 +48,9 @@ } userConf.PortSpecs = nil } - if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { + if len(imageConf.PortSpecs) > 0 { // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. - utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } @@ -64,16 +65,8 @@ } } } - if !userConf.Tty { - userConf.Tty = imageConf.Tty - } - if !userConf.OpenStdin { - userConf.OpenStdin = imageConf.OpenStdin - } - if !userConf.StdinOnce { - userConf.StdinOnce = imageConf.StdinOnce - } - if userConf.Env == nil || len(userConf.Env) == 0 { + + if len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { for _, imageEnv := range imageConf.Env { @@ -90,25 +83,20 @@ } } } - if userConf.Cmd == nil || len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - } - if userConf.Dns == nil || len(userConf.Dns) == 0 { - userConf.Dns = imageConf.Dns - } else { - //duplicates aren't an issue here - userConf.Dns = append(userConf.Dns, imageConf.Dns...) - } - if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { - userConf.Entrypoint = imageConf.Entrypoint + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } } if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir } - if userConf.VolumesFrom == "" { - userConf.VolumesFrom = imageConf.VolumesFrom - } - if userConf.Volumes == nil || len(userConf.Volumes) == 0 { + if len(userConf.Volumes) == 0 { userConf.Volumes = imageConf.Volumes } else { for k, v := range imageConf.Volumes { diff -Nru docker.io-0.9.1~dfsg1/runconfig/parse.go docker.io-1.3.2~dfsg1/runconfig/parse.go --- docker.io-0.9.1~dfsg1/runconfig/parse.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/parse.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,79 +2,85 @@ import ( "fmt" - "github.com/dotcloud/docker/nat" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/opts" - "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/utils" - "io/ioutil" "path" + "strconv" "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/utils" ) var ( - ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d") + ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.") + ErrConflictContainerNetworkAndDns = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.") + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") + ErrConflictHostNetworkAndDns = fmt.Errorf("Conflicting options: --net=host can't be used with --dns. This configuration is invalid.") + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.") ) -//FIXME Only used in tests -func Parse(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { - cmd := flag.NewFlagSet("run", flag.ContinueOnError) - cmd.SetOutput(ioutil.Discard) - cmd.Usage = nil - return parseRun(cmd, args, sysInfo) -} - -// FIXME: this maps the legacy commands.go code. It should be merged with Parse to only expose a single parse function. -func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { - return parseRun(cmd, args, sysInfo) -} - -func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { +func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) flVolumes = opts.NewListOpts(opts.ValidatePath) flLinks = opts.NewListOpts(opts.ValidateLink) flEnv = opts.NewListOpts(opts.ValidateEnv) + flDevices = opts.NewListOpts(opts.ValidatePath) - flPublish opts.ListOpts - flExpose opts.ListOpts - flDns opts.ListOpts - flVolumesFrom opts.ListOpts - flLxcOpts opts.ListOpts - - flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") - flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") + flPublish = opts.NewListOpts(nil) + flExpose = opts.NewListOpts(nil) + flDns = opts.NewListOpts(opts.ValidateIPAddress) + flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) + flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) + flVolumesFrom = opts.NewListOpts(nil) + flLxcOpts = opts.NewListOpts(nil) + flEnvFile = opts.NewListOpts(nil) + flCapAdd = opts.NewListOpts(nil) + flCapDrop = opts.NewListOpts(nil) + flSecurityOpt = opts.NewListOpts(nil) + + flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") - flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - - // For documentation purpose - _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") - _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") + flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") ) - cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") - cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") - cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") + cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR.") + cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)") + cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container in the form of name:alias") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)") + cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables") - cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") - cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") + cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + + cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") + cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") + cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err @@ -86,30 +92,48 @@ } // Validate input params - if *flDetach && flAttach.Len() > 0 { - return nil, nil, cmd, ErrConflictAttachDetach - } if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { - return nil, nil, cmd, ErrInvalidWorikingDirectory + return nil, nil, cmd, ErrInvalidWorkingDirectory + } + + var ( + attachStdin = flAttach.Get("stdin") + attachStdout = flAttach.Get("stdout") + attachStderr = flAttach.Get("stderr") + ) + + if *flNetMode != "bridge" && *flNetMode != "none" && *flHostname != "" { + return nil, nil, cmd, ErrConflictNetworkHostname + } + + if *flNetMode == "host" && flLinks.Len() > 0 { + return nil, nil, cmd, ErrConflictHostNetworkAndLinks } - if *flDetach && *flAutoRemove { - return nil, nil, cmd, ErrConflictDetachAutoRemove + + if *flNetMode == "container" && flLinks.Len() > 0 { + return nil, nil, cmd, ErrConflictContainerNetworkAndLinks + } + + if *flNetMode == "host" && flDns.Len() > 0 { + return nil, nil, cmd, ErrConflictHostNetworkAndDns + } + + if *flNetMode == "container" && flDns.Len() > 0 { + return nil, nil, cmd, ErrConflictContainerNetworkAndDns } // If neither -d or -a are set, attach to everything by default - if flAttach.Len() == 0 && !*flDetach { - if !*flDetach { - flAttach.Set("stdout") - flAttach.Set("stderr") - if *flStdin { - flAttach.Set("stdin") - } + if flAttach.Len() == 0 { + attachStdout = true + attachStderr = true + if *flStdin { + attachStdin = true } } var flMemory int64 if *flMemoryString != "" { - parsedMemory, err := utils.RAMInBytes(*flMemoryString) + parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } @@ -120,11 +144,11 @@ // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { if arr := strings.Split(bind, ":"); len(arr) > 1 { - if arr[0] == "/" { - return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") + if arr[1] == "/" { + return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'") } - dstDir := arr[1] - flVolumes.Set(dstDir) + // after creating the bind mount we want to delete it from the flVolumes values because + // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } else if bind == "/" { @@ -148,7 +172,7 @@ entrypoint = []string{*flEntrypoint} } - lxcConf, err := parseLxcConfOpts(flLxcOpts) + lxcConf, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } @@ -179,6 +203,38 @@ } } + // parse device mappings + deviceMappings := []DeviceMapping{} + for _, device := range flDevices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, cmd, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables := []string{} + for _, ef := range flEnvFile.GetAll() { + parsedVars, err := opts.ParseEnvFile(ef) + if err != nil { + return nil, nil, cmd, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, flEnv.GetAll()...) + + netMode, err := parseNetMode(*flNetMode) + if err != nil { + return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) + } + + restartPolicy, err := parseRestartPolicy(*flRestartPolicy) + if err != nil { + return nil, nil, cmd, err + } + config := &Config{ Hostname: hostname, Domainname: domainname, @@ -190,15 +246,14 @@ OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, - AttachStdin: flAttach.Get("stdin"), - AttachStdout: flAttach.Get("stdout"), - AttachStderr: flAttach.Get("stderr"), - Env: flEnv.GetAll(), + Cpuset: *flCpuset, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, Cmd: runCmd, - Dns: flDns.GetAll(), Image: image, Volumes: flVolumes.GetMap(), - VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), Entrypoint: entrypoint, WorkingDir: *flWorkingDir, } @@ -211,6 +266,16 @@ PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: netMode, + Devices: deviceMappings, + CapAdd: flCapAdd.GetAll(), + CapDrop: flCapDrop.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -225,22 +290,117 @@ return config, hostConfig, cmd, nil } -func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) { - out := make([]KeyValuePair, opts.Len()) +// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func parseRestartPolicy(policy string) (RestartPolicy, error) { + p := RestartPolicy{} + + if policy == "" { + return p, nil + } + + var ( + parts = strings.Split(policy, ":") + name = parts[0] + ) + + switch name { + case "always": + p.Name = name + + if len(parts) == 2 { + return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") + } + case "no": + // do nothing + case "on-failure": + p.Name = name + + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, err + } + + p.MaximumRetryCount = count + } + default: + return p, fmt.Errorf("invalid restart policy %s", name) + } + + return p, nil +} + +// options will come in the format of name.key=value or name.option +func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { + out := make(map[string][]string, len(opts.GetAll())) + for _, o := range opts.GetAll() { + parts := strings.SplitN(o, ".", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid opt format %s", o) + } else if strings.TrimSpace(parts[0]) == "" { + return nil, fmt.Errorf("key cannot be empty %s", o) + } + values, exists := out[parts[0]] + if !exists { + values = []string{} + } + out[parts[0]] = append(values, parts[1]) + } + return out, nil +} + +func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { + out := make([]utils.KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { - k, v, err := parseLxcOpt(o) + k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { return nil, err } - out[i] = KeyValuePair{Key: k, Value: v} + out[i] = utils.KeyValuePair{Key: k, Value: v} } return out, nil } -func parseLxcOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) +func parseNetMode(netMode string) (NetworkMode, error) { + parts := strings.Split(netMode, ":") + switch mode := parts[0]; mode { + case "bridge", "none", "host": + case "container": + if len(parts) < 2 || parts[1] == "" { + return "", fmt.Errorf("invalid container format container:") + } + default: + return "", fmt.Errorf("invalid --net: %s", netMode) + } + return NetworkMode(netMode), nil +} + +func ParseDevice(device string) (DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + dst = arr[1] + fallthrough + case 1: + src = arr[0] + default: + return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil + return deviceMapping, nil } diff -Nru docker.io-0.9.1~dfsg1/runconfig/parse_test.go docker.io-1.3.2~dfsg1/runconfig/parse_test.go --- docker.io-0.9.1~dfsg1/runconfig/parse_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runconfig/parse_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,14 +1,26 @@ package runconfig import ( + "io/ioutil" "testing" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" ) +func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return Parse(cmd, args, sysInfo) +} + func TestParseLxcConfOpt(t *testing.T) { opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} for _, o := range opts { - k, v, err := parseLxcOpt(o) + k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { t.FailNow() } @@ -20,3 +32,29 @@ } } } + +func TestNetHostname(t *testing.T) { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } +} diff -Nru docker.io-0.9.1~dfsg1/runtime.go docker.io-1.3.2~dfsg1/runtime.go --- docker.io-0.9.1~dfsg1/runtime.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/runtime.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,906 +0,0 @@ -package docker - -import ( - "container/list" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/lxc" - "github.com/dotcloud/docker/execdriver/native" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/graphdriver/aufs" - _ "github.com/dotcloud/docker/graphdriver/btrfs" - _ "github.com/dotcloud/docker/graphdriver/devmapper" - _ "github.com/dotcloud/docker/graphdriver/vfs" - _ "github.com/dotcloud/docker/networkdriver/lxc" - "github.com/dotcloud/docker/networkdriver/portallocator" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "regexp" - "sort" - "strings" - "sync" - "time" -) - -// Set the max depth to the aufs default that most -// kernels are compiled with -// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk -const MaxImageDepth = 127 - -var ( - defaultDns = []string{"8.8.8.8", "8.8.4.4"} - validContainerNameChars = `[a-zA-Z0-9_.-]` - validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) -) - -type Runtime struct { - repository string - sysInitPath string - containers *list.List - graph *Graph - repositories *TagStore - idIndex *utils.TruncIndex - sysInfo *sysinfo.SysInfo - volumes *Graph - srv *Server - eng *engine.Engine - config *DaemonConfig - containerGraph *graphdb.Database - driver graphdriver.Driver - execDriver execdriver.Driver -} - -// List returns an array of all containers registered in the runtime. -func (runtime *Runtime) List() []*Container { - containers := new(History) - for e := runtime.containers.Front(); e != nil; e = e.Next() { - containers.Add(e.Value.(*Container)) - } - return *containers -} - -func (runtime *Runtime) getContainerElement(id string) *list.Element { - for e := runtime.containers.Front(); e != nil; e = e.Next() { - container := e.Value.(*Container) - if container.ID == id { - return e - } - } - return nil -} - -// Get looks for a container by the specified ID or name, and returns it. -// If the container is not found, or if an error occurs, nil is returned. -func (runtime *Runtime) Get(name string) *Container { - if c, _ := runtime.GetByName(name); c != nil { - return c - } - - id, err := runtime.idIndex.Get(name) - if err != nil { - return nil - } - - e := runtime.getContainerElement(id) - if e == nil { - return nil - } - return e.Value.(*Container) -} - -// Exists returns a true if a container of the specified ID or name exists, -// false otherwise. -func (runtime *Runtime) Exists(id string) bool { - return runtime.Get(id) != nil -} - -func (runtime *Runtime) containerRoot(id string) string { - return path.Join(runtime.repository, id) -} - -// Load reads the contents of a container from disk -// This is typically done at startup. -func (runtime *Runtime) load(id string) (*Container, error) { - container := &Container{root: runtime.containerRoot(id)} - if err := container.FromDisk(); err != nil { - return nil, err - } - if container.ID != id { - return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) - } - if container.State.IsRunning() { - container.State.SetGhost(true) - } - return container, nil -} - -// Register makes a container object usable by the runtime as -func (runtime *Runtime) Register(container *Container) error { - if container.runtime != nil || runtime.Exists(container.ID) { - return fmt.Errorf("Container is already loaded") - } - if err := validateID(container.ID); err != nil { - return err - } - if err := runtime.ensureName(container); err != nil { - return err - } - - container.runtime = runtime - - // Attach to stdout and stderr - container.stderr = utils.NewWriteBroadcaster() - container.stdout = utils.NewWriteBroadcaster() - // Attach to stdin - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } else { - container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin - } - // done - runtime.containers.PushBack(container) - runtime.idIndex.Add(container.ID) - - // FIXME: if the container is supposed to be running but is not, auto restart it? - // if so, then we need to restart monitor and init a new lock - // If the container is supposed to be running, make sure of it - if container.State.IsRunning() { - if container.State.IsGhost() { - utils.Debugf("killing ghost %s", container.ID) - - existingPid := container.State.Pid - container.State.SetGhost(false) - container.State.SetStopped(0) - - if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { - lxc.KillLxc(container.ID, 9) - } else { - command := &execdriver.Command{ - ID: container.ID, - } - command.Process = &os.Process{Pid: existingPid} - runtime.execDriver.Kill(command, 9) - } - // ensure that the filesystem is also unmounted - unmountVolumesForContainer(container) - if err := container.Unmount(); err != nil { - utils.Debugf("ghost unmount error %s", err) - } - } - - info := runtime.execDriver.Info(container.ID) - if !info.IsRunning() { - utils.Debugf("Container %s was supposed to be running but is not.", container.ID) - if runtime.config.AutoRestart { - utils.Debugf("Restarting") - unmountVolumesForContainer(container) - if err := container.Unmount(); err != nil { - utils.Debugf("restart unmount error %s", err) - } - - container.State.SetGhost(false) - container.State.SetStopped(0) - if err := container.Start(); err != nil { - return err - } - } else { - utils.Debugf("Marking as stopped") - container.State.SetStopped(-127) - if err := container.ToDisk(); err != nil { - return err - } - } - } - } else { - // When the container is not running, we still initialize the waitLock - // chan and close it. Receiving on nil chan blocks whereas receiving on a - // closed chan does not. In this case we do not want to block. - container.waitLock = make(chan struct{}) - close(container.waitLock) - } - return nil -} - -func (runtime *Runtime) ensureName(container *Container) error { - if container.Name == "" { - name, err := generateRandomName(runtime) - if err != nil { - name = utils.TruncateID(container.ID) - } - container.Name = name - - if err := container.ToDisk(); err != nil { - utils.Debugf("Error saving container name %s", err) - } - if !runtime.containerGraph.Exists(name) { - if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { - utils.Debugf("Setting default id - %s", err) - } - } - } - return nil -} - -func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error { - log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return err - } - src.AddWriter(log, stream) - return nil -} - -// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem. -func (runtime *Runtime) Destroy(container *Container) error { - if container == nil { - return fmt.Errorf("The given container is ") - } - - element := runtime.getContainerElement(container.ID) - if element == nil { - return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) - } - - if err := container.Stop(3); err != nil { - return err - } - - if err := runtime.driver.Remove(container.ID); err != nil { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err) - } - - initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Remove(initID); err != nil { - return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err) - } - - if _, err := runtime.containerGraph.Purge(container.ID); err != nil { - utils.Debugf("Unable to remove container from link graph: %s", err) - } - - // Deregister the container before removing its directory, to avoid race conditions - runtime.idIndex.Delete(container.ID) - runtime.containers.Remove(element) - if err := os.RemoveAll(container.root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } - return nil -} - -func (runtime *Runtime) restore() error { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Printf("Loading containers: ") - } - dir, err := ioutil.ReadDir(runtime.repository) - if err != nil { - return err - } - containers := make(map[string]*Container) - currentDriver := runtime.driver.String() - - for _, v := range dir { - id := v.Name() - container, err := runtime.load(id) - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Print(".") - } - if err != nil { - utils.Errorf("Failed to load container %v: %v", id, err) - continue - } - - // Ignore the container if it does not support the current driver being used by the graph - if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { - utils.Debugf("Loaded container %v", container.ID) - containers[container.ID] = container - } else { - utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) - } - } - - register := func(container *Container) { - if err := runtime.Register(container); err != nil { - utils.Debugf("Failed to register container %s: %s", container.ID, err) - } - } - - if entities := runtime.containerGraph.List("/", -1); entities != nil { - for _, p := range entities.Paths() { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Print(".") - } - e := entities[p] - if container, ok := containers[e.ID()]; ok { - register(container) - delete(containers, e.ID()) - } - } - } - - // Any containers that are left over do not exist in the graph - for _, container := range containers { - // Try to set the default name for a container if it exists prior to links - container.Name, err = generateRandomName(runtime) - if err != nil { - container.Name = utils.TruncateID(container.ID) - } - - if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { - utils.Debugf("Setting default id - %s", err) - } - register(container) - } - - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Printf(": done.\n") - } - - return nil -} - -// Create creates a new container from the given configuration with a given name. -func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { - // Lookup image - img, err := runtime.repositories.LookupImage(config.Image) - if err != nil { - return nil, nil, err - } - - // We add 2 layers to the depth because the container's rw and - // init layer add to the restriction - depth, err := img.Depth() - if err != nil { - return nil, nil, err - } - - if depth+2 >= MaxImageDepth { - return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) - } - - checkDeprecatedExpose := func(config *runconfig.Config) bool { - if config != nil { - if config.PortSpecs != nil { - for _, p := range config.PortSpecs { - if strings.Contains(p, ":") { - return true - } - } - } - } - return false - } - - warnings := []string{} - if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { - warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") - } - - if img.Config != nil { - if err := runconfig.Merge(config, img.Config); err != nil { - return nil, nil, err - } - } - - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return nil, nil, fmt.Errorf("No command specified") - } - - // Generate id - id := GenerateID() - - if name == "" { - name, err = generateRandomName(runtime) - if err != nil { - name = utils.TruncateID(id) - } - } else { - if !validContainerNamePattern.MatchString(name) { - return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) - } - } - - if name[0] != '/' { - name = "/" + name - } - - // Set the enitity in the graph using the default name specified - if _, err := runtime.containerGraph.Set(name, id); err != nil { - if !graphdb.IsNonUniqueNameError(err) { - return nil, nil, err - } - - conflictingContainer, err := runtime.GetByName(name) - if err != nil { - if strings.Contains(err.Error(), "Could not find entity") { - return nil, nil, err - } - - // Remove name and continue starting the container - if err := runtime.containerGraph.Delete(name); err != nil { - return nil, nil, err - } - } else { - nameAsKnownByUser := strings.TrimPrefix(name, "/") - return nil, nil, fmt.Errorf( - "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, - utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) - } - } - - // Generate default hostname - // FIXME: the lxc template no longer needs to set a default hostname - if config.Hostname == "" { - config.Hostname = id[:12] - } - - var args []string - var entrypoint string - - if len(config.Entrypoint) != 0 { - entrypoint = config.Entrypoint[0] - args = append(config.Entrypoint[1:], config.Cmd...) - } else { - entrypoint = config.Cmd[0] - args = config.Cmd[1:] - } - - container := &Container{ - // FIXME: we should generate the ID here instead of receiving it as an argument - ID: id, - Created: time.Now().UTC(), - Path: entrypoint, - Args: args, //FIXME: de-duplicate from config - Config: config, - hostConfig: &runconfig.HostConfig{}, - Image: img.ID, // Always use the resolved image id - NetworkSettings: &NetworkSettings{}, - Name: name, - Driver: runtime.driver.String(), - ExecDriver: runtime.execDriver.Name(), - } - container.root = runtime.containerRoot(container.ID) - // Step 1: create the container directory. - // This doubles as a barrier to avoid race conditions. - if err := os.Mkdir(container.root, 0700); err != nil { - return nil, nil, err - } - - initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Create(initID, img.ID); err != nil { - return nil, nil, err - } - initPath, err := runtime.driver.Get(initID) - if err != nil { - return nil, nil, err - } - defer runtime.driver.Put(initID) - - if err := setupInitLayer(initPath); err != nil { - return nil, nil, err - } - - if err := runtime.driver.Create(container.ID, initID); err != nil { - return nil, nil, err - } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return nil, nil, err - } - - if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - //"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns - runtime.config.Dns = defaultDns - } - - // If custom dns exists, then create a resolv.conf for the container - if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 { - var dns []string - if len(config.Dns) > 0 { - dns = config.Dns - } else { - dns = runtime.config.Dns - } - container.ResolvConfPath = path.Join(container.root, "resolv.conf") - f, err := os.Create(container.ResolvConfPath) - if err != nil { - return nil, nil, err - } - defer f.Close() - for _, dns := range dns { - if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { - return nil, nil, err - } - } - } else { - container.ResolvConfPath = "/etc/resolv.conf" - } - - // Step 2: save the container json - if err := container.ToDisk(); err != nil { - return nil, nil, err - } - - // Step 3: register the container - if err := runtime.Register(container); err != nil { - return nil, nil, err - } - return container, warnings, nil -} - -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) { - // FIXME: freeze the container before copying it to avoid data corruption? - // FIXME: this shouldn't be in commands. - if err := container.Mount(); err != nil { - return nil, err - } - defer container.Unmount() - - rwTar, err := container.ExportRw() - if err != nil { - return nil, err - } - defer rwTar.Close() - - // Create a new image from the container's base layers + a new layer from container changes - img, err := runtime.graph.Create(rwTar, container, comment, author, config) - if err != nil { - return nil, err - } - // Register the image if needed - if repository != "" { - if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil { - return img, err - } - } - return img, nil -} - -func getFullName(name string) (string, error) { - if name == "" { - return "", fmt.Errorf("Container name cannot be empty") - } - if name[0] != '/' { - name = "/" + name - } - return name, nil -} - -func (runtime *Runtime) GetByName(name string) (*Container, error) { - fullName, err := getFullName(name) - if err != nil { - return nil, err - } - entity := runtime.containerGraph.Get(fullName) - if entity == nil { - return nil, fmt.Errorf("Could not find entity for %s", name) - } - e := runtime.getContainerElement(entity.ID()) - if e == nil { - return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) - } - return e.Value.(*Container), nil -} - -func (runtime *Runtime) Children(name string) (map[string]*Container, error) { - name, err := getFullName(name) - if err != nil { - return nil, err - } - children := make(map[string]*Container) - - err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { - c := runtime.Get(e.ID()) - if c == nil { - return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) - } - children[p] = c - return nil - }, 0) - - if err != nil { - return nil, err - } - return children, nil -} - -func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error { - fullName := path.Join(parent.Name, alias) - if !runtime.containerGraph.Exists(fullName) { - _, err := runtime.containerGraph.Set(fullName, child.ID) - return err - } - return nil -} - -// FIXME: harmonize with NewGraph() -func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { - runtime, err := NewRuntimeFromDirectory(config, eng) - if err != nil { - return nil, err - } - return runtime, nil -} - -func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { - - // Set the default driver - graphdriver.DefaultDriver = config.GraphDriver - - // Load storage driver - driver, err := graphdriver.New(config.Root) - if err != nil { - return nil, err - } - utils.Debugf("Using graph driver %s", driver) - - runtimeRepo := path.Join(config.Root, "containers") - - if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - - if ad, ok := driver.(*aufs.Driver); ok { - utils.Debugf("Migrating existing containers") - if err := ad.Migrate(config.Root, setupInitLayer); err != nil { - return nil, err - } - } - - utils.Debugf("Creating images graph") - g, err := NewGraph(path.Join(config.Root, "graph"), driver) - if err != nil { - return nil, err - } - - // We don't want to use a complex driver like aufs or devmapper - // for volumes, just a plain filesystem - volumesDriver, err := graphdriver.GetDriver("vfs", config.Root) - if err != nil { - return nil, err - } - utils.Debugf("Creating volumes graph") - volumes, err := NewGraph(path.Join(config.Root, "volumes"), volumesDriver) - if err != nil { - return nil, err - } - utils.Debugf("Creating repository list") - repositories, err := NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store: %s", err) - } - - if !config.DisableNetwork { - job := eng.Job("init_networkdriver") - - job.SetenvBool("EnableIptables", config.EnableIptables) - job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) - job.SetenvBool("EnableIpForward", config.EnableIpForward) - job.Setenv("BridgeIface", config.BridgeIface) - job.Setenv("BridgeIP", config.BridgeIP) - job.Setenv("DefaultBindingIP", config.DefaultIp.String()) - - if err := job.Run(); err != nil { - return nil, err - } - } - - graphdbPath := path.Join(config.Root, "linkgraph.db") - graph, err := graphdb.NewSqliteConn(graphdbPath) - if err != nil { - return nil, err - } - - localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) - sysInitPath := utils.DockerInitPath(localCopy) - if sysInitPath == "" { - return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") - } - - if sysInitPath != localCopy { - // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). - if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { - return nil, err - } - if err := os.Chmod(localCopy, 0700); err != nil { - return nil, err - } - sysInitPath = localCopy - } - - var ( - ed execdriver.Driver - sysInfo = sysinfo.New(false) - ) - - switch config.ExecDriver { - case "lxc": - // we want to five the lxc driver the full docker root because it needs - // to access and write config and template files in /var/lib/docker/containers/* - // to be backwards compatible - ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) - case "native": - ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) - default: - return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) - } - if err != nil { - return nil, err - } - - runtime := &Runtime{ - repository: runtimeRepo, - containers: list.New(), - graph: g, - repositories: repositories, - idIndex: utils.NewTruncIndex(), - sysInfo: sysInfo, - volumes: volumes, - config: config, - containerGraph: graph, - driver: driver, - sysInitPath: sysInitPath, - execDriver: ed, - eng: eng, - } - - if err := runtime.restore(); err != nil { - return nil, err - } - return runtime, nil -} - -func (runtime *Runtime) Close() error { - errorsStrings := []string{} - if err := portallocator.ReleaseAll(); err != nil { - utils.Errorf("portallocator.ReleaseAll(): %s", err) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := runtime.driver.Cleanup(); err != nil { - utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := runtime.containerGraph.Close(); err != nil { - utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if len(errorsStrings) > 0 { - return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) - } - return nil -} - -func (runtime *Runtime) Mount(container *Container) error { - dir, err := runtime.driver.Get(container.ID) - if err != nil { - return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err) - } - if container.basefs == "" { - container.basefs = dir - } else if container.basefs != dir { - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - runtime.driver, container.ID, container.basefs, dir) - } - return nil -} - -func (runtime *Runtime) Unmount(container *Container) error { - runtime.driver.Put(container.ID) - return nil -} - -func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) { - if differ, ok := runtime.driver.(graphdriver.Differ); ok { - return differ.Changes(container.ID) - } - cDir, err := runtime.driver.Get(container.ID) - if err != nil { - return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - defer runtime.driver.Put(container.ID) - initDir, err := runtime.driver.Get(container.ID + "-init") - if err != nil { - return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - defer runtime.driver.Put(container.ID + "-init") - return archive.ChangesDirs(cDir, initDir) -} - -func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { - if differ, ok := runtime.driver.(graphdriver.Differ); ok { - return differ.Diff(container.ID) - } - - changes, err := runtime.Changes(container) - if err != nil { - return nil, err - } - - cDir, err := runtime.driver.Get(container.ID) - if err != nil { - return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - - archive, err := archive.ExportChanges(cDir, changes) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - runtime.driver.Put(container.ID) - return err - }), nil -} - -func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - return runtime.execDriver.Run(c.command, pipes, startCallback) -} - -func (runtime *Runtime) Kill(c *Container, sig int) error { - return runtime.execDriver.Kill(c.command, sig) -} - -// Nuke kills all containers then removes all content -// from the content root, including images, volumes and -// container filesystems. -// Again: this will remove your entire docker runtime! -func (runtime *Runtime) Nuke() error { - var wg sync.WaitGroup - for _, container := range runtime.List() { - wg.Add(1) - go func(c *Container) { - c.Kill() - wg.Done() - }(container) - } - wg.Wait() - runtime.Close() - - return os.RemoveAll(runtime.config.Root) -} - -// FIXME: this is a convenience function for integration tests -// which need direct access to runtime.graph. -// Once the tests switch to using engine and jobs, this method -// can go away. -func (runtime *Runtime) Graph() *Graph { - return runtime.graph -} - -// History is a convenience type for storing a list of containers, -// ordered by creation date. -type History []*Container - -func (history *History) Len() int { - return len(*history) -} - -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].When().Before(containers[i].When()) -} - -func (history *History) Swap(i, j int) { - containers := *history - tmp := containers[i] - containers[i] = containers[j] - containers[j] = tmp -} - -func (history *History) Add(container *Container) { - *history = append(*history, container) - sort.Sort(history) -} diff -Nru docker.io-0.9.1~dfsg1/server.go docker.io-1.3.2~dfsg1/server.go --- docker.io-0.9.1~dfsg1/server.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,2408 +0,0 @@ -package docker - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "os/exec" - gosignal "os/signal" - "path" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" -) - -// jobInitApi runs the remote api server `srv` as a daemon, -// Only one api server can run at the same time - this is enforced by a pidfile. -// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. -func InitServer(job *engine.Job) engine.Status { - job.Logf("Creating server") - srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) - if err != nil { - return job.Error(err) - } - if srv.runtime.config.Pidfile != "" { - job.Logf("Creating pidfile") - if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil { - // FIXME: do we need fatal here instead of returning a job error? - log.Fatal(err) - } - } - job.Logf("Setting up signal traps") - c := make(chan os.Signal, 1) - gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) - go func() { - sig := <-c - log.Printf("Received signal '%v', exiting\n", sig) - utils.RemovePidFile(srv.runtime.config.Pidfile) - srv.Close() - os.Exit(0) - }() - job.Eng.Hack_SetGlobalVar("httpapi.server", srv) - job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) - - for name, handler := range map[string]engine.Handler{ - "export": srv.ContainerExport, - "create": srv.ContainerCreate, - "stop": srv.ContainerStop, - "restart": srv.ContainerRestart, - "start": srv.ContainerStart, - "kill": srv.ContainerKill, - "wait": srv.ContainerWait, - "tag": srv.ImageTag, - "resize": srv.ContainerResize, - "commit": srv.ContainerCommit, - "info": srv.DockerInfo, - "container_delete": srv.ContainerDestroy, - "image_export": srv.ImageExport, - "images": srv.Images, - "history": srv.ImageHistory, - "viz": srv.ImagesViz, - "container_copy": srv.ContainerCopy, - "insert": srv.ImageInsert, - "attach": srv.ContainerAttach, - "search": srv.ImagesSearch, - "changes": srv.ContainerChanges, - "top": srv.ContainerTop, - "load": srv.ImageLoad, - "build": srv.Build, - "pull": srv.ImagePull, - "import": srv.ImageImport, - "image_delete": srv.ImageDelete, - "inspect": srv.JobInspect, - "events": srv.Events, - "push": srv.ImagePush, - "containers": srv.Containers, - "auth": srv.Auth, - } { - if err := job.Eng.Register(name, handler); err != nil { - return job.Error(err) - } - } - return engine.StatusOK -} - -// simpleVersionInfo is a simple implementation of -// the interface VersionInfo, which is used -// to provide version information for some product, -// component, etc. It stores the product name and the version -// in string and returns them on calls to Name() and Version(). -type simpleVersionInfo struct { - name string - version string -} - -func (v *simpleVersionInfo) Name() string { - return v.name -} - -func (v *simpleVersionInfo) Version() string { - return v.version -} - -// ContainerKill send signal to the container -// If no signal is given (sig 0), then Kill with SIGKILL and wait -// for the container to exit. -// If a signal is given, then just send it to the container and return. -func (srv *Server) ContainerKill(job *engine.Job) engine.Status { - if n := len(job.Args); n < 1 || n > 2 { - return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) - } - var ( - name = job.Args[0] - sig uint64 - err error - ) - - // If we have a signal, look at it. Otherwise, do nothing - if len(job.Args) == 2 && job.Args[1] != "" { - // Check if we passed the singal as a number: - // The largest legal signal is 31, so let's parse on 5 bits - sig, err = strconv.ParseUint(job.Args[1], 10, 5) - if err != nil { - // The signal is not a number, treat it as a string - sig = uint64(signal.SignalMap[job.Args[1]]) - if sig == 0 { - return job.Errorf("Invalid signal: %s", job.Args[1]) - } - - } - } - - if container := srv.runtime.Get(name); container != nil { - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - if err := container.Kill(); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image)) - } else { - // Otherwise, just send the requested signal - if err := container.kill(int(sig)); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - // FIXME: Add event for signals - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) Auth(job *engine.Job) engine.Status { - var ( - err error - authConfig = &auth.AuthConfig{} - ) - - job.GetenvJson("authConfig", authConfig) - // TODO: this is only done here because auth and registry need to be merged into one pkg - if addr := authConfig.ServerAddress; addr != "" && addr != auth.IndexServerAddress() { - addr, err = registry.ExpandAndVerifyRegistryUrl(addr) - if err != nil { - return job.Error(err) - } - authConfig.ServerAddress = addr - } - status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) - if err != nil { - return job.Error(err) - } - job.Printf("%s\n", status) - return engine.StatusOK -} - -func (srv *Server) Events(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s FROM", job.Name) - } - - var ( - from = job.Args[0] - since = job.GetenvInt64("since") - ) - sendEvent := func(event *utils.JSONMessage) error { - b, err := json.Marshal(event) - if err != nil { - return fmt.Errorf("JSON error") - } - _, err = job.Stdout.Write(b) - if err != nil { - // On error, evict the listener - utils.Errorf("%s", err) - srv.Lock() - delete(srv.listeners, from) - srv.Unlock() - return err - } - return nil - } - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners[from] = listener - srv.Unlock() - job.Stdout.Write(nil) // flush - if since != 0 { - // If since, send previous events that happened after the timestamp - for _, event := range srv.GetEvents() { - if event.Time >= since { - err := sendEvent(&event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - job.Error(err) - return engine.StatusErr - } - } - } - } - for event := range listener { - err := sendEvent(&event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - return job.Error(err) - } - } - return engine.StatusOK -} - -func (srv *Server) ContainerExport(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s container_id", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - data, err := container.Export() - if err != nil { - return job.Errorf("%s: %s", name, err) - } - defer data.Close() - - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Errorf("%s: %s", name, err) - } - // FIXME: factor job-specific LogEvent to engine.Job.Run() - srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image)) - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -// ImageExport exports all images with the given tag. All versions -// containing the same tag are exported. The resulting output is an -// uncompressed tar ball. -// name is the set of tags to export. -// out is the writer where the images are written to. -func (srv *Server) ImageExport(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - // get image json - tempdir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(tempdir) - - utils.Debugf("Serializing %s", name) - - rootRepo, err := srv.runtime.repositories.Get(name) - if err != nil { - return job.Error(err) - } - if rootRepo != nil { - for _, id := range rootRepo { - image, err := srv.ImageInspect(id) - if err != nil { - return job.Error(err) - } - - if err := srv.exportImage(image, tempdir); err != nil { - return job.Error(err) - } - } - - // write repositories - rootRepoMap := map[string]Repository{} - rootRepoMap[name] = rootRepo - rootRepoJson, _ := json.Marshal(rootRepoMap) - - if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { - return job.Error(err) - } - } else { - image, err := srv.ImageInspect(name) - if err != nil { - return job.Error(err) - } - if err := srv.exportImage(image, tempdir); err != nil { - return job.Error(err) - } - } - - fs, err := archive.Tar(tempdir, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - defer fs.Close() - - if _, err := io.Copy(job.Stdout, fs); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) exportImage(image *Image, tempdir string) error { - for i := image; i != nil; { - // temporary directory - tmpImageDir := path.Join(tempdir, i.ID) - if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { - if os.IsExist(err) { - return nil - } - return err - } - - var version = "1.0" - var versionBuf = []byte(version) - - if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil { - return err - } - - // serialize json - b, err := json.Marshal(i) - if err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil { - return err - } - - // serialize filesystem - fs, err := i.TarLayer() - if err != nil { - return err - } - defer fs.Close() - - fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) - if err != nil { - return err - } - if _, err = io.Copy(fsTar, fs); err != nil { - return err - } - fsTar.Close() - - // find parent - if i.Parent != "" { - i, err = srv.ImageInspect(i.Parent) - if err != nil { - return err - } - } else { - i = nil - } - } - return nil -} - -func (srv *Server) Build(job *engine.Job) engine.Status { - if len(job.Args) != 0 { - return job.Errorf("Usage: %s\n", job.Name) - } - var ( - remoteURL = job.Getenv("remote") - repoName = job.Getenv("t") - suppressOutput = job.GetenvBool("q") - noCache = job.GetenvBool("nocache") - rm = job.GetenvBool("rm") - authConfig = &auth.AuthConfig{} - configFile = &auth.ConfigFile{} - tag string - context io.ReadCloser - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("configFile", configFile) - repoName, tag = utils.ParseRepositoryTag(repoName) - - if remoteURL == "" { - context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return job.Errorf("Error trying to use git: %s (%s)", err, output) - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - context = c - } else if utils.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) - if err != nil { - return job.Error(err) - } - defer f.Body.Close() - dockerFile, err := ioutil.ReadAll(f.Body) - if err != nil { - return job.Error(err) - } - c, err := archive.Generate("Dockerfile", string(dockerFile)) - if err != nil { - return job.Error(err) - } - context = c - } - defer context.Close() - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - b := NewBuildFile(srv, - &StdoutFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - &StderrFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) - id, err := b.Build(context) - if err != nil { - return job.Error(err) - } - if repoName != "" { - srv.runtime.repositories.Set(repoName, tag, id, false) - } - return engine.StatusOK -} - -// Loads a set of images into the repository. This is the complementary of ImageExport. -// The input stream is an uncompressed tar ball containing images and metadata. -func (srv *Server) ImageLoad(job *engine.Job) engine.Status { - tmpImageDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(tmpImageDir) - - var ( - repoTarFile = path.Join(tmpImageDir, "repo.tar") - repoDir = path.Join(tmpImageDir, "repo") - ) - - tarFile, err := os.Create(repoTarFile) - if err != nil { - return job.Error(err) - } - if _, err := io.Copy(tarFile, job.Stdin); err != nil { - return job.Error(err) - } - tarFile.Close() - - repoFile, err := os.Open(repoTarFile) - if err != nil { - return job.Error(err) - } - if err := os.Mkdir(repoDir, os.ModeDir); err != nil { - return job.Error(err) - } - if err := archive.Untar(repoFile, repoDir, nil); err != nil { - return job.Error(err) - } - - dirs, err := ioutil.ReadDir(repoDir) - if err != nil { - return job.Error(err) - } - - for _, d := range dirs { - if d.IsDir() { - if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { - return job.Error(err) - } - } - } - - repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) - if err == nil { - repositories := map[string]Repository{} - if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { - return job.Error(err) - } - - for imageName, tagMap := range repositories { - for tag, address := range tagMap { - if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil { - return job.Error(err) - } - } - } - } else if !os.IsNotExist(err) { - return job.Error(err) - } - - return engine.StatusOK -} - -func (srv *Server) recursiveLoad(address, tmpImageDir string) error { - if _, err := srv.ImageInspect(address); err != nil { - utils.Debugf("Loading %s", address) - - imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) - if err != nil { - utils.Debugf("Error reading json", err) - return err - } - - layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) - if err != nil { - utils.Debugf("Error reading embedded tar", err) - return err - } - img, err := NewImgJSON(imageJson) - if err != nil { - utils.Debugf("Error unmarshalling json", err) - return err - } - if img.Parent != "" { - if !srv.runtime.graph.Exists(img.Parent) { - if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { - return err - } - } - } - if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil { - return err - } - } - utils.Debugf("Completed processing %s", address) - - return nil -} - -func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s TERM", job.Name) - } - var ( - term = job.Args[0] - metaHeaders = map[string][]string{} - authConfig = &auth.AuthConfig{} - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress()) - if err != nil { - return job.Error(err) - } - results, err := r.SearchRepositories(term) - if err != nil { - return job.Error(err) - } - outs := engine.NewTable("star_count", 0) - for _, result := range results.Results { - out := &engine.Env{} - out.Import(result) - outs.Add(out) - } - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageInsert(job *engine.Job) engine.Status { - if len(job.Args) != 3 { - return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) - } - - var ( - name = job.Args[0] - url = job.Args[1] - path = job.Args[2] - ) - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - - out := utils.NewWriteFlusher(job.Stdout) - img, err := srv.runtime.repositories.LookupImage(name) - if err != nil { - return job.Error(err) - } - - file, err := utils.Download(url) - if err != nil { - return job.Error(err) - } - defer file.Body.Close() - - config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) - if err != nil { - return job.Error(err) - } - - c, _, err := srv.runtime.Create(config, "") - if err != nil { - return job.Error(err) - } - - if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { - return job.Error(err) - } - // FIXME: Handle custom repo, tag comment, author - img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) - if err != nil { - out.Write(sf.FormatError(err)) - return engine.StatusErr - } - out.Write(sf.FormatStatus("", img.ID)) - return engine.StatusOK -} - -func (srv *Server) ImagesViz(job *engine.Job) engine.Status { - images, _ := srv.runtime.graph.Map() - if images == nil { - return engine.StatusOK - } - job.Stdout.Write([]byte("digraph docker {\n")) - - var ( - parentImage *Image - err error - ) - for _, image := range images { - parentImage, err = image.GetParent() - if err != nil { - return job.Errorf("Error while getting parent image: %v", err) - } - if parentImage != nil { - job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) - } else { - job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) - } - } - - reporefs := make(map[string][]string) - - for name, repository := range srv.runtime.repositories.Repositories { - for tag, id := range repository { - reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) - } - } - - for id, repos := range reporefs { - job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) - } - job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) - return engine.StatusOK -} - -func (srv *Server) Images(job *engine.Job) engine.Status { - var ( - allImages map[string]*Image - err error - ) - if job.GetenvBool("all") { - allImages, err = srv.runtime.graph.Map() - } else { - allImages, err = srv.runtime.graph.Heads() - } - if err != nil { - return job.Error(err) - } - lookup := make(map[string]*engine.Env) - for name, repository := range srv.runtime.repositories.Repositories { - if job.Getenv("filter") != "" { - if match, _ := path.Match(job.Getenv("filter"), name); !match { - continue - } - } - for tag, id := range repository { - image, err := srv.runtime.graph.Get(id) - if err != nil { - log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) - continue - } - - if out, exists := lookup[id]; exists { - out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) - } else { - out := &engine.Env{} - delete(allImages, id) - out.Set("ParentId", image.Parent) - out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) - out.Set("Id", image.ID) - out.SetInt64("Created", image.Created.Unix()) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) - lookup[id] = out - } - - } - } - - outs := engine.NewTable("Created", len(lookup)) - for _, value := range lookup { - outs.Add(value) - } - - // Display images which aren't part of a repository/tag - if job.Getenv("filter") == "" { - for _, image := range allImages { - out := &engine.Env{} - out.Set("ParentId", image.Parent) - out.SetList("RepoTags", []string{":"}) - out.Set("Id", image.ID) - out.SetInt64("Created", image.Created.Unix()) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) - outs.Add(out) - } - } - - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) DockerInfo(job *engine.Job) engine.Status { - images, _ := srv.runtime.graph.Map() - var imgcount int - if images == nil { - imgcount = 0 - } else { - imgcount = len(images) - } - kernelVersion := "" - if kv, err := utils.GetKernelVersion(); err == nil { - kernelVersion = kv.String() - } - - // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) - initPath := utils.DockerInitPath("") - if initPath == "" { - // if that fails, we'll just return the path from the runtime - initPath = srv.runtime.sysInitPath - } - - v := &engine.Env{} - v.SetInt("Containers", len(srv.runtime.List())) - v.SetInt("Images", imgcount) - v.Set("Driver", srv.runtime.driver.String()) - v.SetJson("DriverStatus", srv.runtime.driver.Status()) - v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit) - v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit) - v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled) - v.SetBool("Debug", os.Getenv("DEBUG") != "") - v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", runtime.NumGoroutine()) - v.Set("ExecutionDriver", srv.runtime.execDriver.Name()) - v.SetInt("NEventsListener", len(srv.listeners)) - v.Set("KernelVersion", kernelVersion) - v.Set("IndexServerAddress", auth.IndexServerAddress()) - v.Set("InitSha1", dockerversion.INITSHA1) - v.Set("InitPath", initPath) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageHistory(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - name := job.Args[0] - image, err := srv.runtime.repositories.LookupImage(name) - if err != nil { - return job.Error(err) - } - - lookupMap := make(map[string][]string) - for name, repository := range srv.runtime.repositories.Repositories { - for tag, id := range repository { - // If the ID already has a reverse lookup, do not update it unless for "latest" - if _, exists := lookupMap[id]; !exists { - lookupMap[id] = []string{} - } - lookupMap[id] = append(lookupMap[id], name+":"+tag) - } - } - - outs := engine.NewTable("Created", 0) - err = image.WalkHistory(func(img *Image) error { - out := &engine.Env{} - out.Set("Id", img.ID) - out.SetInt64("Created", img.Created.Unix()) - out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) - out.SetList("Tags", lookupMap[img.ID]) - out.SetInt64("Size", img.Size) - outs.Add(out) - return nil - }) - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ContainerTop(job *engine.Job) engine.Status { - if len(job.Args) != 1 && len(job.Args) != 2 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) - } - var ( - name = job.Args[0] - psArgs = "-ef" - ) - - if len(job.Args) == 2 && job.Args[1] != "" { - psArgs = job.Args[1] - } - - if container := srv.runtime.Get(name); container != nil { - if !container.State.IsRunning() { - return job.Errorf("Container %s is not running", name) - } - pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID) - if err != nil { - return job.Error(err) - } - output, err := exec.Command("ps", psArgs).Output() - if err != nil { - return job.Errorf("Error running ps: %s", err) - } - - lines := strings.Split(string(output), "\n") - header := strings.Fields(lines[0]) - out := &engine.Env{} - out.SetList("Titles", header) - - pidIndex := -1 - for i, name := range header { - if name == "PID" { - pidIndex = i - } - } - if pidIndex == -1 { - return job.Errorf("Couldn't find PID field in ps output") - } - - processes := [][]string{} - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := strings.Fields(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(header)-1] - process = append(process, strings.Join(fields[len(header)-1:], " ")) - processes = append(processes, process) - } - } - } - out.SetJson("Processes", processes) - out.WriteTo(job.Stdout) - return engine.StatusOK - - } - return job.Errorf("No such container: %s", name) -} - -func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s CONTAINER", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - outs := engine.NewTable("", 0) - changes, err := container.Changes() - if err != nil { - return job.Error(err) - } - for _, change := range changes { - out := &engine.Env{} - if err := out.Import(change); err != nil { - return job.Error(err) - } - outs.Add(out) - } - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) Containers(job *engine.Job) engine.Status { - var ( - foundBefore bool - displayed int - all = job.GetenvBool("all") - since = job.Getenv("since") - before = job.Getenv("before") - n = job.GetenvInt("limit") - size = job.GetenvBool("size") - ) - outs := engine.NewTable("Created", 0) - - names := map[string][]string{} - srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error { - names[e.ID()] = append(names[e.ID()], p) - return nil - }, -1) - - for _, container := range srv.runtime.List() { - if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { - continue - } - if before != "" && !foundBefore { - if container.ID == before || utils.TruncateID(container.ID) == before { - foundBefore = true - } - continue - } - if n > 0 && displayed == n { - break - } - if container.ID == since || utils.TruncateID(container.ID) == since { - break - } - displayed++ - out := &engine.Env{} - out.Set("Id", container.ID) - out.SetList("Names", names[container.ID]) - out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) - out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))) - out.SetInt64("Created", container.Created.Unix()) - out.Set("Status", container.State.String()) - str, err := container.NetworkSettings.PortMappingAPI().ToListString() - if err != nil { - return job.Error(err) - } - out.Set("Ports", str) - if size { - sizeRw, sizeRootFs := container.GetSize() - out.SetInt64("SizeRw", sizeRw) - out.SetInt64("SizeRootFs", sizeRootFs) - } - outs.Add(out) - } - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - - container := srv.runtime.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) - } - var config = container.Config - var newConfig runconfig.Config - if err := job.GetenvJson("config", &newConfig); err != nil { - return job.Error(err) - } - - if err := runconfig.Merge(&newConfig, config); err != nil { - return job.Error(err) - } - - img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig) - if err != nil { - return job.Error(err) - } - job.Printf("%s\n", img.ID) - return engine.StatusOK -} - -func (srv *Server) ImageTag(job *engine.Job) engine.Status { - if len(job.Args) != 2 && len(job.Args) != 3 { - return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) - } - var tag string - if len(job.Args) == 3 { - tag = job.Args[2] - } - if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error { - history, err := r.GetRemoteHistory(imgID, endpoint, token) - if err != nil { - return err - } - out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) - // FIXME: Try to stream the images? - // FIXME: Launch the getRemoteImage() in goroutines - - for i := len(history) - 1; i >= 0; i-- { - id := history[i] - - // ensure no two downloads of the same layer happen at the same time - if c, err := srv.poolAdd("pull", "layer:"+id); err != nil { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err) - <-c - } - defer srv.poolRemove("pull", "layer:"+id) - - if !srv.runtime.graph.Exists(id) { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) - var ( - imgJSON []byte - imgSize int - err error - img *Image - ) - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) - if err != nil && j == retries { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - img, err = NewImgJSON(imgJSON) - if err != nil && j == retries { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return fmt.Errorf("Failed to parse json: %s", err) - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } else { - break - } - } - - // Get the layer - out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) - layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return err - } - defer layer.Close() - if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) - return err - } - } - out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) - - } - return nil -} - -func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error { - out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) - - repoData, err := r.GetRepositoryData(remoteName) - if err != nil { - return err - } - - utils.Debugf("Retrieving the tag list") - tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) - if err != nil { - utils.Errorf("%v", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - utils.Debugf("Registering tags") - // If no tag has been specified, pull them all - if askedTag == "" { - for tag, id := range tagsList { - repoData.ImgList[id].Tag = tag - } - } else { - // Otherwise, check that the tag exists and use only that one - id, exists := tagsList[askedTag] - if !exists { - return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) - } - repoData.ImgList[id].Tag = askedTag - } - - errors := make(chan error) - for _, image := range repoData.ImgList { - downloadImage := func(img *registry.ImgData) { - if askedTag != "" && img.Tag != askedTag { - utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) - if parallel { - errors <- nil - } - return - } - - if img.Tag == "" { - utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - if parallel { - errors <- nil - } - return - } - - // ensure no two downloads of the same image happen at the same time - if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil { - if c != nil { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) - <-c - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) - } else { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) - } - if parallel { - errors <- nil - } - return - } - defer srv.poolRemove("pull", "img:"+img.ID) - - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) - success := false - var lastErr error - for _, ep := range repoData.Endpoints { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) - if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { - // Its not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) - continue - } - success = true - break - } - if !success { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil)) - if parallel { - errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") - return - } - } - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) - - if parallel { - errors <- nil - } - } - - if parallel { - go downloadImage(image) - } else { - downloadImage(image) - } - } - if parallel { - var lastError error - for i := 0; i < len(repoData.ImgList); i++ { - if err := <-errors; err != nil { - lastError = err - } - } - if lastError != nil { - return lastError - } - - } - for tag, id := range tagsList { - if askedTag != "" && tag != askedTag { - continue - } - if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil { - return err - } - } - if err := srv.runtime.repositories.Save(); err != nil { - return err - } - - return nil -} - -func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) { - srv.Lock() - defer srv.Unlock() - - if c, exists := srv.pullingPool[key]; exists { - return c, fmt.Errorf("pull %s is already in progress", key) - } - if c, exists := srv.pushingPool[key]; exists { - return c, fmt.Errorf("push %s is already in progress", key) - } - - c := make(chan struct{}) - switch kind { - case "pull": - srv.pullingPool[key] = c - case "push": - srv.pushingPool[key] = c - default: - return nil, fmt.Errorf("Unknown pool type") - } - return c, nil -} - -func (srv *Server) poolRemove(kind, key string) error { - srv.Lock() - defer srv.Unlock() - switch kind { - case "pull": - if c, exists := srv.pullingPool[key]; exists { - close(c) - delete(srv.pullingPool, key) - } - case "push": - if c, exists := srv.pushingPool[key]; exists { - close(c) - delete(srv.pushingPool, key) - } - default: - return fmt.Errorf("Unknown pool type") - } - return nil -} - -func (srv *Server) ImagePull(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 && n != 2 { - return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) - } - var ( - localName = job.Args[0] - tag string - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = &auth.AuthConfig{} - metaHeaders map[string][]string - ) - if len(job.Args) > 1 { - tag = job.Args[1] - } - - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - - c, err := srv.poolAdd("pull", localName+":"+tag) - if err != nil { - if c != nil { - // Another pull of the same repository is already taking place; just wait for it to finish - job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) - <-c - return engine.StatusOK - } - return job.Error(err) - } - defer srv.poolRemove("pull", localName+":"+tag) - - // Resolve the Repository name from fqn to endpoint + name - hostname, remoteName, err := registry.ResolveRepositoryName(localName) - if err != nil { - return job.Error(err) - } - - endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) - if err != nil { - return job.Error(err) - } - - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) - if err != nil { - return job.Error(err) - } - - if endpoint == auth.IndexServerAddress() { - // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" - localName = remoteName - } - - if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { - return job.Error(err) - } - - return engine.StatusOK -} - -// Retrieve the all the images to be uploaded in the correct order -func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { - var ( - imageList []string - imagesSeen map[string]bool = make(map[string]bool) - tagsByImage map[string][]string = make(map[string][]string) - ) - - for tag, id := range localRepo { - var imageListForThisTag []string - - tagsByImage[id] = append(tagsByImage[id], tag) - - for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() { - if err != nil { - return nil, nil, err - } - - if imagesSeen[img.ID] { - // This image is already on the list, we can ignore it and all its parents - break - } - - imagesSeen[img.ID] = true - imageListForThisTag = append(imageListForThisTag, img.ID) - } - - // reverse the image list for this tag (so the "most"-parent image is first) - for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { - imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - - utils.Debugf("Image list: %v", imageList) - utils.Debugf("Tags by image: %v", tagsByImage) - - return imageList, tagsByImage, nil -} - -func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { - out = utils.NewWriteFlusher(out) - utils.Debugf("Local repo: %s", localRepo) - imgList, tagsByImage, err := srv.getImageList(localRepo) - if err != nil { - return err - } - - out.Write(sf.FormatStatus("", "Sending image list")) - - var repoData *registry.RepositoryData - var imageIndex []*registry.ImgData - - for _, imgId := range imgList { - if tags, exists := tagsByImage[imgId]; exists { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: imgId, - Tag: tag, - }) - } - } else { - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is accociated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: imgId, - Tag: "", - }) - - } - } - - utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo) - for _, data := range imageIndex { - utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag) - } - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) - if err != nil { - return err - } - - for _, ep := range repoData.Endpoints { - out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) - - for _, imgId := range imgList { - if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { - out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) - } else { - if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { - // FIXME: Continue on error? - return err - } - } - - for _, tag := range tagsByImage[imgId] { - out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) - - if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { - return err - } - } - } - } - - if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { - return err - } - - return nil -} - -func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { - out = utils.NewWriteFlusher(out) - jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json")) - if err != nil { - return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) - } - out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) - - imgData := ®istry.ImgData{ - ID: imgID, - } - - // Send the json - if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { - if err == registry.ErrAlreadyExists { - out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) - return "", nil - } - return "", err - } - - layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) - if err != nil { - return "", fmt.Errorf("Failed to generate layer archive: %s", err) - } - defer os.RemoveAll(layerData.Name()) - - // Send the layer - checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { - return "", err - } - - out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) - return imgData.Checksum, nil -} - -// FIXME: Allow to interrupt current push when new push of same image is done. -func (srv *Server) ImagePush(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - var ( - localName = job.Args[0] - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = &auth.AuthConfig{} - metaHeaders map[string][]string - ) - - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - if _, err := srv.poolAdd("push", localName); err != nil { - return job.Error(err) - } - defer srv.poolRemove("push", localName) - - // Resolve the Repository name from fqn to endpoint + name - hostname, remoteName, err := registry.ResolveRepositoryName(localName) - if err != nil { - return job.Error(err) - } - - endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) - if err != nil { - return job.Error(err) - } - - img, err := srv.runtime.graph.Get(localName) - r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) - if err2 != nil { - return job.Error(err2) - } - - if err != nil { - reposLen := len(srv.runtime.repositories.Repositories[localName]) - job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) - // If it fails, try to get the repository - if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists { - if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Error(err) - } - - var token []string - job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) - if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageImport(job *engine.Job) engine.Status { - if n := len(job.Args); n != 2 && n != 3 { - return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) - } - var ( - src = job.Args[0] - repo = job.Args[1] - tag string - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - archive archive.ArchiveReader - resp *http.Response - ) - if len(job.Args) > 2 { - tag = job.Args[2] - } - - if src == "-" { - archive = job.Stdin - } else { - u, err := url.Parse(src) - if err != nil { - return job.Error(err) - } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) - // Download with curl (pretty progress bar) - // If curl is not available, fallback to http.Get() - resp, err = utils.Download(u.String()) - if err != nil { - return job.Error(err) - } - progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") - defer progressReader.Close() - archive = progressReader - } - img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) - if err != nil { - return job.Error(err) - } - // Optionally register the image at REPO/TAG - if repo != "" { - if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil { - return job.Error(err) - } - } - job.Stdout.Write(sf.FormatStatus("", img.ID)) - return engine.StatusOK -} - -func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { - var name string - if len(job.Args) == 1 { - name = job.Args[0] - } else if len(job.Args) > 1 { - return job.Errorf("Usage: %s", job.Name) - } - config := runconfig.ContainerConfigFromJob(job) - if config.Memory != 0 && config.Memory < 524288 { - return job.Errorf("Minimum memory limit allowed is 512k") - } - if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { - job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") - config.Memory = 0 - } - if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit { - job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") - config.MemorySwap = -1 - } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return job.Error(err) - } - if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns) - config.Dns = defaultDns - } - - container, buildWarnings, err := srv.runtime.Create(config, name) - if err != nil { - if srv.runtime.graph.IsNotExist(err) { - _, tag := utils.ParseRepositoryTag(config.Image) - if tag == "" { - tag = DEFAULTTAG - } - return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) - } - return job.Error(err) - } - if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { - job.Errorf("IPv4 forwarding is disabled.\n") - } - srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) - // FIXME: this is necessary because runtime.Create might return a nil container - // with a non-nil error. This should not happen! Once it's fixed we - // can remove this workaround. - if container != nil { - job.Printf("%s\n", container.ID) - } - for _, warning := range buildWarnings { - job.Errorf("%s\n", warning) - } - return engine.StatusOK -} - -func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Restart(int(t)); err != nil { - return job.Errorf("Cannot restart container %s: %s\n", name, err) - } - srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image)) - } else { - return job.Errorf("No such container: %s\n", name) - } - return engine.StatusOK -} - -func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - removeVolume := job.GetenvBool("removeVolume") - removeLink := job.GetenvBool("removeLink") - forceRemove := job.GetenvBool("forceRemove") - - container := srv.runtime.Get(name) - - if removeLink { - if container == nil { - return job.Errorf("No such link: %s", name) - } - name, err := getFullName(name) - if err != nil { - job.Error(err) - } - parent, n := path.Split(name) - if parent == "/" { - return job.Errorf("Conflict, cannot remove the default name of the container") - } - pe := srv.runtime.containerGraph.Get(parent) - if pe == nil { - return job.Errorf("Cannot get parent %s for name %s", parent, name) - } - parentContainer := srv.runtime.Get(pe.ID()) - - if parentContainer != nil && parentContainer.activeLinks != nil { - if link, exists := parentContainer.activeLinks[n]; exists { - link.Disable() - } else { - utils.Debugf("Could not find active link for %s", name) - } - } - - if err := srv.runtime.containerGraph.Delete(name); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - - if container != nil { - if container.State.IsRunning() { - if forceRemove { - if err := container.Stop(5); err != nil { - return job.Errorf("Could not stop running container, cannot remove - %v", err) - } - } else { - return job.Errorf("Impossible to remove a running container, please stop it first or use -f") - } - } - if err := srv.runtime.Destroy(container); err != nil { - return job.Errorf("Cannot destroy container %s: %s", name, err) - } - srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image)) - - if removeVolume { - var ( - volumes = make(map[string]struct{}) - binds = make(map[string]struct{}) - usedVolumes = make(map[string]*Container) - ) - - // the volume id is always the base of the path - getVolumeId := func(p string) string { - return filepath.Base(strings.TrimSuffix(p, "/layer")) - } - - // populate bind map so that they can be skipped and not removed - for _, bind := range container.hostConfig.Binds { - source := strings.Split(bind, ":")[0] - // TODO: refactor all volume stuff, all of it - // this is very important that we eval the link - // or comparing the keys to container.Volumes will not work - p, err := filepath.EvalSymlinks(source) - if err != nil { - return job.Error(err) - } - source = p - binds[source] = struct{}{} - } - - // Store all the deleted containers volumes - for _, volumeId := range container.Volumes { - // Skip the volumes mounted from external - // bind mounts here will will be evaluated for a symlink - if _, exists := binds[volumeId]; exists { - continue - } - - volumeId = getVolumeId(volumeId) - volumes[volumeId] = struct{}{} - } - - // Retrieve all volumes from all remaining containers - for _, container := range srv.runtime.List() { - for _, containerVolumeId := range container.Volumes { - containerVolumeId = getVolumeId(containerVolumeId) - usedVolumes[containerVolumeId] = container - } - } - - for volumeId := range volumes { - // If the requested volu - if c, exists := usedVolumes[volumeId]; exists { - log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID) - continue - } - if err := srv.runtime.volumes.Delete(volumeId); err != nil { - return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) - } - } - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { - var ( - repoName, tag string - tags = []string{} - ) - - repoName, tag = utils.ParseRepositoryTag(name) - if tag == "" { - tag = DEFAULTTAG - } - - img, err := srv.runtime.repositories.LookupImage(name) - if err != nil { - if r, _ := srv.runtime.repositories.Get(repoName); r != nil { - return fmt.Errorf("No such image: %s:%s", repoName, tag) - } - return fmt.Errorf("No such image: %s", name) - } - - if strings.Contains(img.ID, name) { - repoName = "" - tag = "" - } - - byParents, err := srv.runtime.graph.ByParent() - if err != nil { - return err - } - - //If delete by id, see if the id belong only to one repository - if repoName == "" { - for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] { - parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) - if repoName == "" || repoName == parsedRepo { - repoName = parsedRepo - if parsedTag != "" { - tags = append(tags, parsedTag) - } - } else if repoName != parsedRepo && !force { - // the id belongs to multiple repos, like base:latest and user:test, - // in that case return conflict - return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) - } - } - } else { - tags = append(tags, tag) - } - - if !first && len(tags) > 0 { - return nil - } - - //Untag the current image - for _, tag := range tags { - tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag) - if err != nil { - return err - } - if tagDeleted { - out := &engine.Env{} - out.Set("Untagged", repoName+":"+tag) - imgs.Add(out) - srv.LogEvent("untag", img.ID, "") - } - } - tags = srv.runtime.repositories.ByID()[img.ID] - if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { - if len(byParents[img.ID]) == 0 { - if err := srv.canDeleteImage(img.ID); err != nil { - return err - } - if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil { - return err - } - if err := srv.runtime.graph.Delete(img.ID); err != nil { - return err - } - out := &engine.Env{} - out.Set("Deleted", img.ID) - imgs.Add(out) - srv.LogEvent("delete", img.ID, "") - if img.Parent != "" { - err := srv.DeleteImage(img.Parent, imgs, false, force) - if first { - return err - } - - } - - } - } - return nil -} - -func (srv *Server) ImageDelete(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - imgs := engine.NewTable("", 0) - if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { - return job.Error(err) - } - if len(imgs.Data) == 0 { - return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) - } - if _, err := imgs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) canDeleteImage(imgID string) error { - for _, container := range srv.runtime.List() { - parent, err := srv.runtime.repositories.LookupImage(container.Image) - if err != nil { - return err - } - - if err := parent.WalkHistory(func(p *Image) error { - if imgID == p.ID { - return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) - } - return nil - }); err != nil { - return err - } - } - return nil -} - -func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) { - - // Retrieve all images - images, err := srv.runtime.graph.Map() - if err != nil { - return nil, err - } - - // Store the tree in a map of map (map[parentId][childId]) - imageMap := make(map[string]map[string]struct{}) - for _, img := range images { - if _, exists := imageMap[img.Parent]; !exists { - imageMap[img.Parent] = make(map[string]struct{}) - } - imageMap[img.Parent][img.ID] = struct{}{} - } - - // Loop on the children of the given image and check the config - var match *Image - for elem := range imageMap[imgID] { - img, err := srv.runtime.graph.Get(elem) - if err != nil { - return nil, err - } - if runconfig.Compare(&img.ContainerConfig, config) { - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil -} - -func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { - runtime := srv.runtime - - if hostConfig != nil && hostConfig.Links != nil { - for _, l := range hostConfig.Links { - parts, err := parseLink(l) - if err != nil { - return err - } - child, err := srv.runtime.GetByName(parts["name"]) - if err != nil { - return err - } - if child == nil { - return fmt.Errorf("Could not get container for %s", parts["name"]) - } - if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil { - return err - } - } - - // After we load all the links into the runtime - // set them to nil on the hostconfig - hostConfig.Links = nil - if err := container.writeHostConfig(); err != nil { - return err - } - } - return nil -} - -func (srv *Server) ContainerStart(job *engine.Job) engine.Status { - if len(job.Args) < 1 { - return job.Errorf("Usage: %s container_id", job.Name) - } - name := job.Args[0] - runtime := srv.runtime - container := runtime.Get(name) - - if container == nil { - return job.Errorf("No such container: %s", name) - } - // If no environment was set, then no hostconfig was passed. - if len(job.Environ()) > 0 { - hostConfig := runconfig.ContainerHostConfigFromJob(job) - // Validate the HostConfig binds. Make sure that: - // 1) the source of a bind mount isn't / - // The bind mount "/:/foo" isn't allowed. - // 2) Check that the source exists - // The source to be bind mounted must exist. - for _, bind := range hostConfig.Binds { - splitBind := strings.Split(bind, ":") - source := splitBind[0] - - // refuse to bind mount "/" to the container - if source == "/" { - return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) - } - - // ensure the source exists on the host - _, err := os.Stat(source) - if err != nil && os.IsNotExist(err) { - err = os.MkdirAll(source, 0755) - if err != nil { - return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) - } - } - } - // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, hostConfig); err != nil { - return job.Error(err) - } - container.hostConfig = hostConfig - container.ToDisk() - } - if err := container.Start(); err != nil { - return job.Errorf("Cannot start container %s: %s", name, err) - } - srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) - - return engine.StatusOK -} - -func (srv *Server) ContainerStop(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Stop(int(t)); err != nil { - return job.Errorf("Cannot stop container %s: %s\n", name, err) - } - srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image)) - } else { - return job.Errorf("No such container: %s\n", name) - } - return engine.StatusOK -} - -func (srv *Server) ContainerWait(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - status := container.Wait() - job.Printf("%d\n", status) - return engine.StatusOK - } - return job.Errorf("%s: no such container: %s", job.Name, name) -} - -func (srv *Server) ContainerResize(job *engine.Job) engine.Status { - if len(job.Args) != 3 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) - } - name := job.Args[0] - height, err := strconv.Atoi(job.Args[1]) - if err != nil { - return job.Error(err) - } - width, err := strconv.Atoi(job.Args[2]) - if err != nil { - return job.Error(err) - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Resize(height, width); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - - var ( - name = job.Args[0] - logs = job.GetenvBool("logs") - stream = job.GetenvBool("stream") - stdin = job.GetenvBool("stdin") - stdout = job.GetenvBool("stdout") - stderr = job.GetenvBool("stderr") - ) - - container := srv.runtime.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) - } - - //logs - if logs { - cLog, err := container.ReadLog("json") - if err != nil && os.IsNotExist(err) { - // Legacy logs - utils.Debugf("Old logs format") - if stdout { - cLog, err := container.ReadLog("stdout") - if err != nil { - utils.Errorf("Error reading logs (stdout): %s", err) - } else if _, err := io.Copy(job.Stdout, cLog); err != nil { - utils.Errorf("Error streaming logs (stdout): %s", err) - } - } - if stderr { - cLog, err := container.ReadLog("stderr") - if err != nil { - utils.Errorf("Error reading logs (stderr): %s", err) - } else if _, err := io.Copy(job.Stderr, cLog); err != nil { - utils.Errorf("Error streaming logs (stderr): %s", err) - } - } - } else if err != nil { - utils.Errorf("Error reading logs (json): %s", err) - } else { - dec := json.NewDecoder(cLog) - for { - l := &utils.JSONLog{} - - if err := dec.Decode(l); err == io.EOF { - break - } else if err != nil { - utils.Errorf("Error streaming logs: %s", err) - break - } - if l.Stream == "stdout" && stdout { - fmt.Fprintf(job.Stdout, "%s", l.Log) - } - if l.Stream == "stderr" && stderr { - fmt.Fprintf(job.Stderr, "%s", l.Log) - } - } - } - } - - //stream - if stream { - if container.State.IsGhost() { - return job.Errorf("Impossible to attach to a ghost container") - } - - var ( - cStdin io.ReadCloser - cStdout, cStderr io.Writer - cStdinCloser io.Closer - ) - - if stdin { - r, w := io.Pipe() - go func() { - defer w.Close() - defer utils.Debugf("Closing buffered stdin pipe") - io.Copy(w, job.Stdin) - }() - cStdin = r - cStdinCloser = job.Stdin - } - if stdout { - cStdout = job.Stdout - } - if stderr { - cStderr = job.Stderr - } - - <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) - - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if container.Config.StdinOnce && !container.Config.Tty { - container.Wait() - } - } - return engine.StatusOK -} - -func (srv *Server) ContainerInspect(name string) (*Container, error) { - if container := srv.runtime.Get(name); container != nil { - return container, nil - } - return nil, fmt.Errorf("No such container: %s", name) -} - -func (srv *Server) ImageInspect(name string) (*Image, error) { - if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil { - return image, nil - } - return nil, fmt.Errorf("No such image: %s", name) -} - -func (srv *Server) JobInspect(job *engine.Job) engine.Status { - // TODO: deprecate KIND/conflict - if n := len(job.Args); n != 2 { - return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) - } - var ( - name = job.Args[0] - kind = job.Args[1] - object interface{} - conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images - image, errImage = srv.ImageInspect(name) - container, errContainer = srv.ContainerInspect(name) - ) - - if conflict && image != nil && container != nil { - return job.Errorf("Conflict between containers and images") - } - - switch kind { - case "image": - if errImage != nil { - return job.Error(errImage) - } - object = image - case "container": - if errContainer != nil { - return job.Error(errContainer) - } - object = &struct { - *Container - HostConfig *runconfig.HostConfig - }{container, container.hostConfig} - default: - return job.Errorf("Unknown kind: %s", kind) - } - - b, err := json.Marshal(object) - if err != nil { - return job.Error(err) - } - job.Stdout.Write(b) - return engine.StatusOK -} - -func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { - if len(job.Args) != 2 { - return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) - } - - var ( - name = job.Args[0] - resource = job.Args[1] - ) - - if container := srv.runtime.Get(name); container != nil { - - data, err := container.Copy(resource) - if err != nil { - return job.Error(err) - } - defer data.Close() - - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { - runtime, err := NewRuntime(config, eng) - if err != nil { - return nil, err - } - srv := &Server{ - Eng: eng, - runtime: runtime, - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), - events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events - listeners: make(map[string]chan utils.JSONMessage), - running: true, - } - runtime.srv = srv - return srv, nil -} - -func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { - srv.Lock() - defer srv.Unlock() - v := dockerVersion() - httpVersion := make([]utils.VersionInfo, 0, 4) - httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"os", v.Get("Os")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"arch", v.Get("Arch")}) - ud := utils.NewHTTPUserAgentDecorator(httpVersion...) - md := &utils.HTTPMetaHeadersDecorator{ - Headers: metaHeaders, - } - factory := utils.NewHTTPRequestFactory(ud, md) - return factory -} - -func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { - now := time.Now().UTC().Unix() - jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} - srv.AddEvent(jm) - for _, c := range srv.listeners { - select { // non blocking channel - case c <- jm: - default: - } - } - return &jm -} - -func (srv *Server) AddEvent(jm utils.JSONMessage) { - srv.Lock() - defer srv.Unlock() - srv.events = append(srv.events, jm) -} - -func (srv *Server) GetEvents() []utils.JSONMessage { - srv.RLock() - defer srv.RUnlock() - return srv.events -} - -func (srv *Server) SetRunning(status bool) { - srv.Lock() - defer srv.Unlock() - - srv.running = status -} - -func (srv *Server) IsRunning() bool { - srv.RLock() - defer srv.RUnlock() - return srv.running -} - -func (srv *Server) Close() error { - if srv == nil { - return nil - } - srv.SetRunning(false) - if srv.runtime == nil { - return nil - } - return srv.runtime.Close() -} - -type Server struct { - sync.RWMutex - runtime *Runtime - pullingPool map[string]chan struct{} - pushingPool map[string]chan struct{} - events []utils.JSONMessage - listeners map[string]chan utils.JSONMessage - Eng *engine.Engine - running bool -} diff -Nru docker.io-0.9.1~dfsg1/server_unit_test.go docker.io-1.3.2~dfsg1/server_unit_test.go --- docker.io-0.9.1~dfsg1/server_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/server_unit_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/utils" - "testing" - "time" -) - -func TestPools(t *testing.T) { - srv := &Server{ - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), - } - - if _, err := srv.poolAdd("pull", "test1"); err != nil { - t.Fatal(err) - } - if _, err := srv.poolAdd("pull", "test2"); err != nil { - t.Fatal(err) - } - if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } - if err := srv.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("pull", "test1"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("push", "test1"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } -} - -func TestLogEvent(t *testing.T) { - srv := &Server{ - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners["test"] = listener - srv.Unlock() - - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") - - numEvents := len(srv.GetEvents()) - if numEvents != 2 { - t.Fatalf("Expected 2 events, found %d", numEvents) - } - go func() { - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction3", "fakeid", "fakeimage") - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction4", "fakeid", "fakeimage") - }() - - setTimeout(t, "Listening for events timed out", 2*time.Second, func() { - for i := 2; i < 4; i++ { - event := <-listener - if event != srv.GetEvents()[i] { - t.Fatalf("Event received it different than expected") - } - } - }) -} - -// FIXME: this is duplicated from integration/commands_test.go -func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { - c := make(chan bool) - - // Make sure we are not too long - go func() { - time.Sleep(d) - c <- true - }() - go func() { - f() - c <- false - }() - if <-c && msg != "" { - t.Fatal(msg) - } -} diff -Nru docker.io-0.9.1~dfsg1/sorter.go docker.io-1.3.2~dfsg1/sorter.go --- docker.io-0.9.1~dfsg1/sorter.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/sorter.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -package docker - -import "sort" - -type containerSorter struct { - containers []*Container - by func(i, j *Container) bool -} - -func (s *containerSorter) Len() int { - return len(s.containers) -} - -func (s *containerSorter) Swap(i, j int) { - s.containers[i], s.containers[j] = s.containers[j], s.containers[i] -} - -func (s *containerSorter) Less(i, j int) bool { - return s.by(s.containers[i], s.containers[j]) -} - -func sortContainers(containers []*Container, predicate func(i, j *Container) bool) { - s := &containerSorter{containers, predicate} - sort.Sort(s) -} diff -Nru docker.io-0.9.1~dfsg1/state.go docker.io-1.3.2~dfsg1/state.go --- docker.io-0.9.1~dfsg1/state.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/state.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/utils" - "sync" - "time" -) - -type State struct { - sync.RWMutex - Running bool - Pid int - ExitCode int - StartedAt time.Time - FinishedAt time.Time - Ghost bool -} - -// String returns a human-readable description of the state -func (s *State) String() string { - s.RLock() - defer s.RUnlock() - - if s.Running { - if s.Ghost { - return fmt.Sprintf("Ghost") - } - return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - return fmt.Sprintf("Exit %d", s.ExitCode) -} - -func (s *State) IsRunning() bool { - s.RLock() - defer s.RUnlock() - - return s.Running -} - -func (s *State) IsGhost() bool { - s.RLock() - defer s.RUnlock() - - return s.Ghost -} - -func (s *State) GetExitCode() int { - s.RLock() - defer s.RUnlock() - - return s.ExitCode -} - -func (s *State) SetGhost(val bool) { - s.Lock() - defer s.Unlock() - - s.Ghost = val -} - -func (s *State) SetRunning(pid int) { - s.Lock() - defer s.Unlock() - - s.Running = true - s.Ghost = false - s.ExitCode = 0 - s.Pid = pid - s.StartedAt = time.Now().UTC() -} - -func (s *State) SetStopped(exitCode int) { - s.Lock() - defer s.Unlock() - - s.Running = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode -} diff -Nru docker.io-0.9.1~dfsg1/sysinit/sysinit.go docker.io-1.3.2~dfsg1/sysinit/sysinit.go --- docker.io-0.9.1~dfsg1/sysinit/sysinit.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/sysinit/sysinit.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -package sysinit - -import ( - "encoding/json" - "flag" - "fmt" - "github.com/dotcloud/docker/execdriver" - _ "github.com/dotcloud/docker/execdriver/lxc" - _ "github.com/dotcloud/docker/execdriver/native" - "io/ioutil" - "log" - "os" - "strings" -) - -// Clear environment pollution introduced by lxc-start -func setupEnv(args *execdriver.InitArgs) { - os.Clearenv() - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if len(parts) == 1 { - parts = append(parts, "") - } - os.Setenv(parts[0], parts[1]) - } -} - -func executeProgram(args *execdriver.InitArgs) error { - setupEnv(args) - - dockerInitFct, err := execdriver.GetInitFunc(args.Driver) - if err != nil { - panic(err) - } - return dockerInitFct(args) -} - -// Sys Init code -// This code is run INSIDE the container and is responsible for setting -// up the environment before running the actual process -func SysInit() { - if len(os.Args) <= 1 { - fmt.Println("You should not invoke dockerinit manually") - os.Exit(1) - } - - var ( - // Get cmdline arguments - user = flag.String("u", "", "username or uid") - gateway = flag.String("g", "", "gateway address") - ip = flag.String("i", "", "ip address") - workDir = flag.String("w", "", "workdir") - privileged = flag.Bool("privileged", false, "privileged mode") - mtu = flag.Int("mtu", 1500, "interface mtu") - driver = flag.String("driver", "", "exec driver") - pipe = flag.Int("pipe", 0, "sync pipe fd") - console = flag.String("console", "", "console (pty slave) path") - root = flag.String("root", ".", "root path for configuration files") - ) - flag.Parse() - - // Get env - var env []string - content, err := ioutil.ReadFile(".dockerenv") - if err != nil { - log.Fatalf("Unable to load environment variables: %v", err) - } - if err := json.Unmarshal(content, &env); err != nil { - log.Fatalf("Unable to unmarshal environment variables: %v", err) - } - // Propagate the plugin-specific container env variable - env = append(env, "container="+os.Getenv("container")) - - args := &execdriver.InitArgs{ - User: *user, - Gateway: *gateway, - Ip: *ip, - WorkDir: *workDir, - Privileged: *privileged, - Env: env, - Args: flag.Args(), - Mtu: *mtu, - Driver: *driver, - Console: *console, - Pipe: *pipe, - Root: *root, - } - - if err := executeProgram(args); err != nil { - log.Fatal(err) - } -} diff -Nru docker.io-0.9.1~dfsg1/tags.go docker.io-1.3.2~dfsg1/tags.go --- docker.io-0.9.1~dfsg1/tags.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/tags.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,234 +0,0 @@ -package docker - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" -) - -const DEFAULTTAG = "latest" - -type TagStore struct { - path string - graph *Graph - Repositories map[string]Repository -} - -type Repository map[string]string - -func NewTagStore(path string, graph *Graph) (*TagStore, error) { - abspath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - store := &TagStore{ - path: abspath, - graph: graph, - Repositories: make(map[string]Repository), - } - // Load the json file if it exists, otherwise create it. - if err := store.Reload(); os.IsNotExist(err) { - if err := store.Save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -func (store *TagStore) Save() error { - // Store the json ball - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { - return err - } - return nil -} - -func (store *TagStore) Reload() error { - jsonData, err := ioutil.ReadFile(store.path) - if err != nil { - return err - } - if err := json.Unmarshal(jsonData, store); err != nil { - return err - } - return nil -} - -func (store *TagStore) LookupImage(name string) (*Image, error) { - // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else - // (so we can pass all errors here) - repos, tag := utils.ParseRepositoryTag(name) - if tag == "" { - tag = DEFAULTTAG - } - img, err := store.GetImage(repos, tag) - if err != nil { - return nil, err - } else if img == nil { - if img, err = store.graph.Get(name); err != nil { - return nil, err - } - } - return img, nil -} - -// Return a reverse-lookup table of all the names which refer to each image -// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} -func (store *TagStore) ByID() map[string][]string { - byID := make(map[string][]string) - for repoName, repository := range store.Repositories { - for tag, id := range repository { - name := repoName + ":" + tag - if _, exists := byID[id]; !exists { - byID[id] = []string{name} - } else { - byID[id] = append(byID[id], name) - sort.Strings(byID[id]) - } - } - } - return byID -} - -func (store *TagStore) ImageName(id string) string { - if names, exists := store.ByID()[id]; exists && len(names) > 0 { - return names[0] - } - return utils.TruncateID(id) -} - -func (store *TagStore) DeleteAll(id string) error { - names, exists := store.ByID()[id] - if !exists || len(names) == 0 { - return nil - } - for _, name := range names { - if strings.Contains(name, ":") { - nameParts := strings.Split(name, ":") - if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { - return err - } - } else { - if _, err := store.Delete(name, ""); err != nil { - return err - } - } - } - return nil -} - -func (store *TagStore) Delete(repoName, tag string) (bool, error) { - deleted := false - if err := store.Reload(); err != nil { - return false, err - } - if r, exists := store.Repositories[repoName]; exists { - if tag != "" { - if _, exists2 := r[tag]; exists2 { - delete(r, tag) - if len(r) == 0 { - delete(store.Repositories, repoName) - } - deleted = true - } else { - return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) - } - } else { - delete(store.Repositories, repoName) - deleted = true - } - } else { - fmt.Errorf("No such repository: %s", repoName) - } - return deleted, store.Save() -} - -func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { - img, err := store.LookupImage(imageName) - if err != nil { - return err - } - if tag == "" { - tag = DEFAULTTAG - } - if err := validateRepoName(repoName); err != nil { - return err - } - if err := validateTagName(tag); err != nil { - return err - } - if err := store.Reload(); err != nil { - return err - } - var repo Repository - if r, exists := store.Repositories[repoName]; exists { - repo = r - } else { - repo = make(map[string]string) - if old, exists := store.Repositories[repoName]; exists && !force { - return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) - } - store.Repositories[repoName] = repo - } - repo[tag] = img.ID - return store.Save() -} - -func (store *TagStore) Get(repoName string) (Repository, error) { - if err := store.Reload(); err != nil { - return nil, err - } - if r, exists := store.Repositories[repoName]; exists { - return r, nil - } - return nil, nil -} - -func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) { - repo, err := store.Get(repoName) - if err != nil { - return nil, err - } else if repo == nil { - return nil, nil - } - if revision, exists := repo[tagOrID]; exists { - return store.graph.Get(revision) - } - // If no matching tag is found, search through images for a matching image id - for _, revision := range repo { - if strings.HasPrefix(revision, tagOrID) { - return store.graph.Get(revision) - } - } - return nil, nil -} - -// Validate the name of a repository -func validateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - return nil -} - -// Validate the name of a tag -func validateTagName(name string) error { - if name == "" { - return fmt.Errorf("Tag name can't be empty") - } - if strings.Contains(name, "/") || strings.Contains(name, ":") { - return fmt.Errorf("Illegal tag name: %s", name) - } - return nil -} diff -Nru docker.io-0.9.1~dfsg1/tags_unit_test.go docker.io-1.3.2~dfsg1/tags_unit_test.go --- docker.io-0.9.1~dfsg1/tags_unit_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/tags_unit_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/utils" - "os" - "path" - "testing" -) - -const ( - testImageName = "myapp" - testImageID = "foo" -) - -func mkTestTagStore(root string, t *testing.T) *TagStore { - driver, err := graphdriver.New(root) - if err != nil { - t.Fatal(err) - } - graph, err := NewGraph(root, driver) - if err != nil { - t.Fatal(err) - } - store, err := NewTagStore(path.Join(root, "tags"), graph) - if err != nil { - t.Fatal(err) - } - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img := &Image{ID: testImageID} - // FIXME: this fails on Darwin with: - // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied - if err := graph.Register(nil, archive, img); err != nil { - t.Fatal(err) - } - if err := store.Set(testImageName, "", testImageID, false); err != nil { - t.Fatal(err) - } - return store -} - -func TestLookupImage(t *testing.T) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - store := mkTestTagStore(tmp, t) - defer store.graph.driver.Cleanup() - - if img, err := store.LookupImage(testImageName); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := store.LookupImage("fail:fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := store.LookupImage(testImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } -} diff -Nru docker.io-0.9.1~dfsg1/.travis.yml docker.io-1.3.2~dfsg1/.travis.yml --- docker.io-0.9.1~dfsg1/.travis.yml 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/.travis.yml 2014-11-24 17:38:01.000000000 +0000 @@ -3,28 +3,37 @@ language: go -go: 1.2 +go: +# This should match the version in the Dockerfile. + - 1.3.1 +# Test against older versions too, just for a little extra retrocompat. + - 1.2 + +# Let us have pretty experimental Docker-based Travis workers. +# (These spin up much faster than the VM-based ones.) +sudo: false # Disable the normal go build. -install: true +install: + - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false") + - export AUTO_GOPATH=1 +# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now + - rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header) + - rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header) + - rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start") + - rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver") + - rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0") + - rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver") + - rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied") + - rm -f pkg/mount/*_test.go # fails to run ("permission denied") before_script: - env | sort - - sudo apt-get update -qq - - sudo apt-get install -qq python-yaml - - git remote add upstream git://github.com/dotcloud/docker.git - - upstream=master; - if [ "$TRAVIS_PULL_REQUEST" != false ]; then - upstream=$TRAVIS_BRANCH; - fi; - git fetch --append --no-tags upstream refs/heads/$upstream:refs/remotes/upstream/$upstream -# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out -# but if it's a PR against non-master, we need that upstream branch instead :) - - sudo pip install -r docs/requirements.txt script: - - hack/travis/dco.py - - hack/travis/gofmt.py - - make -sC docs SPHINXOPTS=-q docs man + - hack/make.sh validate-dco + - hack/make.sh validate-gofmt + - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary + - ./hack/make.sh dynbinary dyntest-unit # vim:set sw=2 ts=2: diff -Nru docker.io-0.9.1~dfsg1/trust/service.go docker.io-1.3.2~dfsg1/trust/service.go --- docker.io-0.9.1~dfsg1/trust/service.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/trust/service.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,74 @@ +package trust + +import ( + "fmt" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/libtrust" +) + +func (t *TrustStore) Install(eng *engine.Engine) error { + for name, handler := range map[string]engine.Handler{ + "trust_key_check": t.CmdCheckKey, + "trust_update_base": t.CmdUpdateBase, + } { + if err := eng.Register(name, handler); err != nil { + return fmt.Errorf("Could not register %q: %v", name, err) + } + } + return nil +} + +func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s NAMESPACE", job.Name) + } + var ( + namespace = job.Args[0] + keyBytes = job.Getenv("PublicKey") + ) + + if keyBytes == "" { + return job.Errorf("Missing PublicKey") + } + pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) + if err != nil { + return job.Errorf("Error unmarshalling public key: %s", err) + } + + permission := uint16(job.GetenvInt("Permission")) + if permission == 0 { + permission = 0x03 + } + + t.RLock() + defer t.RUnlock() + if t.graph == nil { + job.Stdout.Write([]byte("no graph")) + return engine.StatusOK + } + + // Check if any expired grants + verified, err := t.graph.Verify(pk, namespace, permission) + if err != nil { + return job.Errorf("Error verifying key to namespace: %s", namespace) + } + if !verified { + log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID()) + job.Stdout.Write([]byte("not verified")) + } else if t.expiration.Before(time.Now()) { + job.Stdout.Write([]byte("expired")) + } else { + job.Stdout.Write([]byte("verified")) + } + + return engine.StatusOK +} + +func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status { + t.fetch() + + return engine.StatusOK +} diff -Nru docker.io-0.9.1~dfsg1/trust/trusts.go docker.io-1.3.2~dfsg1/trust/trusts.go --- docker.io-0.9.1~dfsg1/trust/trusts.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/trust/trusts.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,199 @@ +package trust + +import ( + "crypto/x509" + "errors" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "sync" + "time" + + "github.com/docker/docker/pkg/log" + "github.com/docker/libtrust/trustgraph" +) + +type TrustStore struct { + path string + caPool *x509.CertPool + graph trustgraph.TrustGraph + expiration time.Time + fetcher *time.Timer + fetchTime time.Duration + autofetch bool + httpClient *http.Client + baseEndpoints map[string]*url.URL + + sync.RWMutex +} + +// defaultFetchtime represents the starting duration to wait between +// fetching sections of the graph. Unsuccessful fetches should +// increase time between fetching. +const defaultFetchtime = 45 * time.Second + +var baseEndpoints = map[string]string{"official": "https://dvjy3tqbc323p.cloudfront.net/trust/official.json"} + +func NewTrustStore(path string) (*TrustStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + // Create base graph url map + endpoints := map[string]*url.URL{} + for name, endpoint := range baseEndpoints { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + endpoints[name] = u + } + + // Load grant files + t := &TrustStore{ + path: abspath, + caPool: nil, + httpClient: &http.Client{}, + fetchTime: time.Millisecond, + baseEndpoints: endpoints, + } + + err = t.reload() + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *TrustStore) reload() error { + t.Lock() + defer t.Unlock() + + matches, err := filepath.Glob(filepath.Join(t.path, "*.json")) + if err != nil { + return err + } + statements := make([]*trustgraph.Statement, len(matches)) + for i, match := range matches { + f, err := os.Open(match) + if err != nil { + return err + } + statements[i], err = trustgraph.LoadStatement(f, nil) + if err != nil { + f.Close() + return err + } + f.Close() + } + if len(statements) == 0 { + if t.autofetch { + log.Debugf("No grants, fetching") + t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) + } + return nil + } + + grants, expiration, err := trustgraph.CollapseStatements(statements, true) + if err != nil { + return err + } + + t.expiration = expiration + t.graph = trustgraph.NewMemoryGraph(grants) + log.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration) + + if t.autofetch { + nextFetch := expiration.Sub(time.Now()) + if nextFetch < 0 { + nextFetch = defaultFetchtime + } else { + nextFetch = time.Duration(0.8 * (float64)(nextFetch)) + } + t.fetcher = time.AfterFunc(nextFetch, t.fetch) + } + + return nil +} + +func (t *TrustStore) fetchBaseGraph(u *url.URL) (*trustgraph.Statement, error) { + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Body: nil, + Host: u.Host, + } + + resp, err := t.httpClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode == 404 { + return nil, errors.New("base graph does not exist") + } + + defer resp.Body.Close() + + return trustgraph.LoadStatement(resp.Body, t.caPool) +} + +// fetch retrieves updated base graphs. This function cannot error, it +// should only log errors +func (t *TrustStore) fetch() { + t.Lock() + defer t.Unlock() + + if t.autofetch && t.fetcher == nil { + // Do nothing ?? + return + } + + fetchCount := 0 + for bg, ep := range t.baseEndpoints { + statement, err := t.fetchBaseGraph(ep) + if err != nil { + log.Infof("Trust graph fetch failed: %s", err) + continue + } + b, err := statement.Bytes() + if err != nil { + log.Infof("Bad trust graph statement: %s", err) + continue + } + // TODO check if value differs + err = ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600) + if err != nil { + log.Infof("Error writing trust graph statement: %s", err) + } + fetchCount++ + } + log.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now()) + + if fetchCount > 0 { + go func() { + err := t.reload() + if err != nil { + // TODO log + log.Infof("Reload of trust graph failed: %s", err) + } + }() + t.fetchTime = defaultFetchtime + t.fetcher = nil + } else if t.autofetch { + maxTime := 10 * defaultFetchtime + t.fetchTime = time.Duration(1.5 * (float64)(t.fetchTime+time.Second)) + if t.fetchTime > maxTime { + t.fetchTime = maxTime + } + t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) + } +} diff -Nru docker.io-0.9.1~dfsg1/utils/checksum.go docker.io-1.3.2~dfsg1/utils/checksum.go --- docker.io-0.9.1~dfsg1/utils/checksum.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/checksum.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -package utils - -import ( - "encoding/hex" - "hash" - "io" -) - -type CheckSum struct { - io.Reader - Hash hash.Hash -} - -func (cs *CheckSum) Read(buf []byte) (int, error) { - n, err := cs.Reader.Read(buf) - if err == nil { - cs.Hash.Write(buf[:n]) - } - return n, err -} - -func (cs *CheckSum) Sum() string { - return hex.EncodeToString(cs.Hash.Sum(nil)) -} diff -Nru docker.io-0.9.1~dfsg1/utils/fs.go docker.io-1.3.2~dfsg1/utils/fs.go --- docker.io-0.9.1~dfsg1/utils/fs.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/fs.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -package utils - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "syscall" -) - -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]bool) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = false - - size += s - - return nil - }) - return -} - -// FollowSymlink will follow an existing link and scope it to the root -// path provided. -func FollowSymlinkInScope(link, root string) (string, error) { - prev := "/" - - root, err := filepath.Abs(root) - if err != nil { - return "", err - } - - link, err = filepath.Abs(link) - if err != nil { - return "", err - } - - if !strings.HasPrefix(filepath.Dir(link), root) { - return "", fmt.Errorf("%s is not within %s", link, root) - } - - for _, p := range strings.Split(link, "/") { - prev = filepath.Join(prev, p) - prev = filepath.Clean(prev) - - for { - stat, err := os.Lstat(prev) - if err != nil { - if os.IsNotExist(err) { - break - } - return "", err - } - if stat.Mode()&os.ModeSymlink == os.ModeSymlink { - dest, err := os.Readlink(prev) - if err != nil { - return "", err - } - - switch dest[0] { - case '/': - prev = filepath.Join(root, dest) - case '.': - prev, _ = filepath.Abs(prev) - - if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { - prev = filepath.Join(root, filepath.Base(dest)) - } - } - } else { - break - } - } - } - return prev, nil -} diff -Nru docker.io-0.9.1~dfsg1/utils/fs_test.go docker.io-1.3.2~dfsg1/utils/fs_test.go --- docker.io-0.9.1~dfsg1/utils/fs_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/fs_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -package utils - -import ( - "path/filepath" - "testing" -) - -func abs(t *testing.T, p string) string { - o, err := filepath.Abs(p) - if err != nil { - t.Fatal(err) - } - return o -} - -func TestFollowSymLinkNormal(t *testing.T) { - link := "testdata/fs/a/d/c/data" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } -} - -func TestFollowSymLinkRandomString(t *testing.T) { - if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("Random string should fail but didn't") - } -} - -func TestFollowSymLinkLastLink(t *testing.T) { - link := "testdata/fs/a/d" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/b"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } -} - -func TestFollowSymLinkRelativeLink(t *testing.T) { - link := "testdata/fs/a/e/c/data" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } -} - -func TestFollowSymLinkRelativeLinkScope(t *testing.T) { - link := "testdata/fs/a/f" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } - - link = "testdata/fs/b/h" - - rewrite, err = FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/root"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } -} diff -Nru docker.io-0.9.1~dfsg1/utils/http.go docker.io-1.3.2~dfsg1/utils/http.go --- docker.io-0.9.1~dfsg1/utils/http.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/http.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,10 +1,11 @@ package utils import ( - "bytes" "io" "net/http" "strings" + + "github.com/docker/docker/pkg/log" ) // VersionInfo is used to model entities which has a version. @@ -15,11 +16,13 @@ } func validVersion(version VersionInfo) bool { - stopChars := " \t\r\n/" - if strings.ContainsAny(version.Name(), stopChars) { + const stopChars = " \t\r\n/" + name := version.Name() + vers := version.Version() + if len(name) == 0 || strings.ContainsAny(name, stopChars) { return false } - if strings.ContainsAny(version.Version(), stopChars) { + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { return false } return true @@ -36,27 +39,18 @@ return base } - var buf bytes.Buffer + verstrs := make([]string, 0, 1+len(versions)) if len(base) > 0 { - buf.Write([]byte(base)) + verstrs = append(verstrs, base) } for _, v := range versions { - name := []byte(v.Name()) - version := []byte(v.Version()) - - if len(name) == 0 || len(version) == 0 { - continue - } if !validVersion(v) { continue } - buf.Write([]byte(v.Name())) - buf.Write([]byte("/")) - buf.Write([]byte(v.Version())) - buf.Write([]byte(" ")) + verstrs = append(verstrs, v.Name()+"/"+v.Version()) } - return buf.String() + return strings.Join(verstrs, " ") } // HTTPRequestDecorator is used to change an instance of @@ -165,6 +159,6 @@ return nil, err } } - Debugf("%v -- HEADERS: %v", req.URL, req.Header) + log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) return req, err } diff -Nru docker.io-0.9.1~dfsg1/utils/jsonmessage.go docker.io-1.3.2~dfsg1/utils/jsonmessage.go --- docker.io-0.9.1~dfsg1/utils/jsonmessage.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/jsonmessage.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,10 +3,13 @@ import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/pkg/term" "io" "strings" "time" + + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/timeutils" + "github.com/docker/docker/pkg/units" ) type JSONError struct { @@ -41,14 +44,19 @@ if p.Current <= 0 && p.Total <= 0 { return "" } - current := HumanSize(int64(p.Current)) + current := units.HumanSize(int64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } - total := HumanSize(int64(p.Total)) + total := units.HumanSize(int64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if width > 110 { - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage)) + // this number can't be negetive gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) @@ -85,7 +93,7 @@ return jm.Error } var endl string - if isTerminal && jm.Stream == "" { + if isTerminal && jm.Stream == "" && jm.Progress != nil { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" @@ -93,7 +101,7 @@ return nil } if jm.Time != 0 { - fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0)) + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) @@ -131,27 +139,27 @@ if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } - if jm.Progress != nil || jm.ProgressMessage != "" { + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { line = len(ids) ids[jm.ID] = line - fmt.Fprintf(out, "\n") + if isTerminal { + fmt.Fprintf(out, "\n") + } diff = 0 } else { diff = len(ids) - line } - if isTerminal { + if jm.ID != "" && isTerminal { // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } } err := jm.Display(out, isTerminal) - if jm.ID != "" { - if isTerminal { - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } + if jm.ID != "" && isTerminal { + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) } if err != nil { return err diff -Nru docker.io-0.9.1~dfsg1/utils/jsonmessage_test.go docker.io-1.3.2~dfsg1/utils/jsonmessage_test.go --- docker.io-0.9.1~dfsg1/utils/jsonmessage_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/jsonmessage_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -17,13 +17,22 @@ t.Fatalf("Expected empty string, got '%s'", jp.String()) } + expected := " 1 B" jp2 := JSONProgress{Current: 1} - if jp2.String() != " 1 B" { - t.Fatalf("Expected ' 1 B', got '%s'", jp2.String()) + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) } + expected = "[=========================> ] 50 B/100 B" jp3 := JSONProgress{Current: 50, Total: 100} - if jp3.String() != "[=========================> ] 50 B/100 B" { - t.Fatalf("Expected '[=========================> ] 50 B/100 B', got '%s'", jp3.String()) + if jp3.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp3.String()) + } + + // this number can't be negetive gh#7136 + expected = "[==============================================================>] 50 B/40 B" + jp4 := JSONProgress{Current: 50, Total: 40} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) } } diff -Nru docker.io-0.9.1~dfsg1/utils/progressreader.go docker.io-1.3.2~dfsg1/utils/progressreader.go --- docker.io-0.9.1~dfsg1/utils/progressreader.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/progressreader.go 2014-11-24 17:38:01.000000000 +0000 @@ -32,7 +32,7 @@ r.lastUpdate = r.progress.Current } // Send newline when complete - if r.newLine && err != nil { + if r.newLine && err != nil && read == 0 { r.output.Write(r.sf.FormatStatus("", "")) } return read, err diff -Nru docker.io-0.9.1~dfsg1/utils/random.go docker.io-1.3.2~dfsg1/utils/random.go --- docker.io-0.9.1~dfsg1/utils/random.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/random.go 2014-11-24 17:38:01.000000000 +0000 @@ -8,8 +8,8 @@ func RandomString() string { id := make([]byte, 32) - _, err := io.ReadFull(rand.Reader, id) - if err != nil { + + if _, err := io.ReadFull(rand.Reader, id); err != nil { panic(err) // This shouldn't happen } return hex.EncodeToString(id) diff -Nru docker.io-0.9.1~dfsg1/utils/stdcopy.go docker.io-1.3.2~dfsg1/utils/stdcopy.go --- docker.io-0.9.1~dfsg1/utils/stdcopy.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/stdcopy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,154 +0,0 @@ -package utils - -import ( - "encoding/binary" - "errors" - "io" -) - -const ( - StdWriterPrefixLen = 8 - StdWriterFdIndex = 0 - StdWriterSizeIndex = 4 -) - -type StdType [StdWriterPrefixLen]byte - -var ( - Stdin StdType = StdType{0: 0} - Stdout StdType = StdType{0: 1} - Stderr StdType = StdType{0: 2} -) - -type StdWriter struct { - io.Writer - prefix StdType - sizeBuf []byte -} - -func (w *StdWriter) Write(buf []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instanciated") - } - binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) - buf = append(w.prefix[:], buf...) - - n, err = w.Writer.Write(buf) - return n - StdWriterPrefixLen, err -} - -// NewStdWriter instanciates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be utils.Stdin, utils.Stdout, utils.Stderr. -func NewStdWriter(w io.Writer, t StdType) *StdWriter { - if len(t) != StdWriterPrefixLen { - return nil - } - - return &StdWriter{ - Writer: w, - prefix: t, - sizeBuf: make([]byte, 4), - } -} - -var ErrInvalidStdHeader = errors.New("Unrecognized input header") - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, 32*1024+StdWriterPrefixLen+1) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < StdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - if er == io.EOF { - return written, nil - } - if er != nil { - return 0, er - } - nr += nr2 - } - - // Check the first byte to know where to write - switch buf[StdWriterFdIndex] { - case 0: - fallthrough - case 1: - // Write on stdout - out = dstout - case 2: - // Write on stderr - out = dsterr - default: - Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) - return 0, ErrInvalidStdHeader - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+StdWriterPrefixLen > bufLen { - Debugf("Extending buffer cap.") - buf = append(buf, make([]byte, frameSize-len(buf)+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+StdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - if er == io.EOF { - return written, nil - } - if er != nil { - Debugf("Error reading frame: %s", er) - return 0, er - } - nr += nr2 - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - Debugf("Error writing frame: %s", ew) - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - Debugf("Error Short Write: (%d on %d)", nw, frameSize) - return 0, io.ErrShortWrite - } - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+StdWriterPrefixLen:]) - // Move the index - nr -= frameSize + StdWriterPrefixLen - } -} diff -Nru docker.io-0.9.1~dfsg1/utils/streamformatter.go docker.io-1.3.2~dfsg1/utils/streamformatter.go --- docker.io-0.9.1~dfsg1/utils/streamformatter.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/streamformatter.go 2014-11-24 17:38:01.000000000 +0000 @@ -3,15 +3,15 @@ import ( "encoding/json" "fmt" + "io" ) type StreamFormatter struct { json bool - used bool } func NewStreamFormatter(json bool) *StreamFormatter { - return &StreamFormatter{json, false} + return &StreamFormatter{json} } const streamNewline = "\r\n" @@ -19,7 +19,6 @@ var streamNewlineBytes = []byte(streamNewline) func (sf *StreamFormatter) FormatStream(str string) []byte { - sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{Stream: str}) if err != nil { @@ -31,7 +30,6 @@ } func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - sf.used = true str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) @@ -44,7 +42,6 @@ } func (sf *StreamFormatter) FormatError(err error) []byte { - sf.used = true if sf.json { jsonError, ok := err.(*JSONError) if !ok { @@ -62,7 +59,6 @@ if progress == nil { progress = &JSONProgress{} } - sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{ @@ -83,10 +79,34 @@ return []byte(action + " " + progress.String() + endl) } -func (sf *StreamFormatter) Used() bool { - return sf.used -} - func (sf *StreamFormatter) Json() bool { return sf.json } + +type StdoutFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +type StderrFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff -Nru docker.io-0.9.1~dfsg1/utils/streamformatter_test.go docker.io-1.3.2~dfsg1/utils/streamformatter_test.go --- docker.io-0.9.1~dfsg1/utils/streamformatter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/streamformatter_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,67 @@ +package utils + +import ( + "encoding/json" + "errors" + "reflect" + "testing" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatStatus(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONError(t *testing.T) { + sf := NewStreamFormatter(true) + err := &JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatProgress(t *testing.T) { + sf := NewStreamFormatter(true) + progress := &JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress) + msg := &JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + if msg.ProgressMessage != progress.String() { + t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) + } + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff -Nru docker.io-0.9.1~dfsg1/utils/tarsum.go docker.io-1.3.2~dfsg1/utils/tarsum.go --- docker.io-0.9.1~dfsg1/utils/tarsum.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/tarsum.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,181 +0,0 @@ -package utils - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "hash" - "io" - "sort" - "strconv" - "strings" -) - -type TarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - gz writeCloseFlusher - bufTar *bytes.Buffer - bufGz *bytes.Buffer - h hash.Hash - sums map[string]string - currentFile string - finished bool - first bool - DisableCompression bool -} - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} - -func (ts *TarSum) encodeHeader(h *tar.Header) error { - for _, elem := range [][2]string{ - {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, - // {"atime", strconv.Itoa(int(h.AccessTime.UTC().Unix()))}, - // {"ctime", strconv.Itoa(int(h.ChangeTime.UTC().Unix()))}, - } { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *TarSum) Read(buf []byte) (int, error) { - if ts.gz == nil { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufGz = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.gz = gzip.NewWriter(ts.bufGz) - } else { - ts.gz = &nopCloseFlusher{Writer: ts.bufGz} - } - ts.h = sha256.New() - ts.h.Reset() - ts.first = true - ts.sums = make(map[string]string) - } - - if ts.finished { - return ts.bufGz.Read(buf) - } - buf2 := make([]byte, len(buf), cap(buf)) - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums[ts.currentFile] = hex.EncodeToString(ts.h.Sum(nil)) - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.gz.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.gz, ts.bufTar); err != nil { - return 0, err - } - ts.gz.Flush() - - return ts.bufGz.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writter - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the gz writter - if _, err = io.Copy(ts.gz, ts.bufTar); err != nil { - return 0, err - } - ts.gz.Flush() - - return ts.bufGz.Read(buf) -} - -func (ts *TarSum) Sum(extra []byte) string { - var sums []string - - for _, sum := range ts.sums { - sums = append(sums, sum) - } - sort.Strings(sums) - h := sha256.New() - if extra != nil { - h.Write(extra) - } - for _, sum := range sums { - Debugf("-->%s<--", sum) - h.Write([]byte(sum)) - } - checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil)) - Debugf("checksum processed: %s", checksum) - return checksum -} - -func (ts *TarSum) GetSums() map[string]string { - return ts.sums -} diff -Nru docker.io-0.9.1~dfsg1/utils/timeoutconn.go docker.io-1.3.2~dfsg1/utils/timeoutconn.go --- docker.io-0.9.1~dfsg1/utils/timeoutconn.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/timeoutconn.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,26 @@ +package utils + +import ( + "net" + "time" +) + +func NewTimeoutConn(conn net.Conn, timeout time.Duration) net.Conn { + return &TimeoutConn{conn, timeout} +} + +// A net.Conn that sets a deadline for every Read or Write operation +type TimeoutConn struct { + net.Conn + timeout time.Duration +} + +func (c *TimeoutConn) Read(b []byte) (int, error) { + if c.timeout > 0 { + err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) + if err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff -Nru docker.io-0.9.1~dfsg1/utils/timeoutconn_test.go docker.io-1.3.2~dfsg1/utils/timeoutconn_test.go --- docker.io-0.9.1~dfsg1/utils/timeoutconn_test.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/timeoutconn_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,33 @@ +package utils + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestTimeoutConnRead(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "hello") + })) + defer ts.Close() + conn, err := net.Dial("tcp", ts.URL[7:]) + if err != nil { + t.Fatalf("failed to create connection to %q: %v", ts.URL, err) + } + tconn := NewTimeoutConn(conn, 1*time.Second) + + if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { + t.Fatalf("expected timeout error, got none") + } + if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { + t.Errorf("unexpected error: %v", err) + } +} diff -Nru docker.io-0.9.1~dfsg1/utils/tmpdir.go docker.io-1.3.2~dfsg1/utils/tmpdir.go --- docker.io-0.9.1~dfsg1/utils/tmpdir.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/tmpdir.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,12 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package utils + +import ( + "os" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootdir string) (string error) { + return os.TempDir(), nil +} diff -Nru docker.io-0.9.1~dfsg1/utils/tmpdir_unix.go docker.io-1.3.2~dfsg1/utils/tmpdir_unix.go --- docker.io-0.9.1~dfsg1/utils/tmpdir_unix.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/tmpdir_unix.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,18 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd + +package utils + +import ( + "os" + "path/filepath" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + err := os.MkdirAll(tmpDir, 0700) + return tmpDir, err +} diff -Nru docker.io-0.9.1~dfsg1/utils/uname_linux.go docker.io-1.3.2~dfsg1/utils/uname_linux.go --- docker.io-0.9.1~dfsg1/utils/uname_linux.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/uname_linux.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -// +build amd64 - -package utils - -import ( - "syscall" -) - -type Utsname syscall.Utsname - -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} - - if err := syscall.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff -Nru docker.io-0.9.1~dfsg1/utils/uname_unsupported.go docker.io-1.3.2~dfsg1/utils/uname_unsupported.go --- docker.io-0.9.1~dfsg1/utils/uname_unsupported.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/uname_unsupported.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -// +build !linux !amd64 - -package utils - -import ( - "errors" -) - -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff -Nru docker.io-0.9.1~dfsg1/utils/utils.go docker.io-1.3.2~dfsg1/utils/utils.go --- docker.io-0.9.1~dfsg1/utils/utils.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/utils.go 2014-11-24 17:38:01.000000000 +0000 @@ -2,14 +2,11 @@ import ( "bytes" + "crypto/rand" "crypto/sha1" "crypto/sha256" "encoding/hex" - "encoding/json" - "errors" "fmt" - "github.com/dotcloud/docker/dockerversion" - "index/suffixarray" "io" "io/ioutil" "net/http" @@ -21,23 +18,17 @@ "strconv" "strings" "sync" - "time" -) + "syscall" -// A common interface to access the Fatal method of -// both testing.B and testing.T. -type Fataler interface { - Fatal(args ...interface{}) -} + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" +) -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch +type KeyValuePair struct { + Key string + Value string } // Request a given URL and return an io.Reader @@ -51,104 +42,6 @@ return resp, nil } -func logf(level string, format string, a ...interface{}) { - // Retrieve the stack infos - _, file, line, ok := runtime.Caller(2) - if !ok { - file = "" - line = -1 - } else { - file = file[strings.LastIndex(file, "/")+1:] - } - - fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...) -} - -// Debug function, if the debug flag is set, then display. Do nothing otherwise -// If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) { - if os.Getenv("DEBUG") != "" { - logf("debug", format, a...) - } -} - -func Errorf(format string, a ...interface{}) { - logf("error", format, a...) -} - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.) -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%f years", d.Hours()/24/365) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB") -func HumanSize(size int64) string { - i := 0 - var sizef float64 - sizef = float64(size) - units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - for sizef >= 1000.0 { - sizef = sizef / 1000.0 - i++ - } - return fmt.Sprintf("%.4g %s", sizef, units[i]) -} - -// Parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes or gibibytes, and returns the -// number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (bytes int64, err error) { - re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") - if error != nil { - return -1, error - } - - matches := re.FindStringSubmatch(size) - - if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", size) - } - - memLimit, error := strconv.ParseInt(matches[1], 10, 0) - if error != nil { - return -1, error - } - - unit := strings.ToLower(matches[2]) - - if unit == "k" { - memLimit *= 1024 - } else if unit == "m" { - memLimit *= 1024 * 1024 - } else if unit == "g" { - memLimit *= 1024 * 1024 * 1024 - } - - return memLimit, nil -} - func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s @@ -257,230 +150,15 @@ return "" } -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -type bufReader struct { - sync.Mutex - buf *bytes.Buffer - reader io.Reader - err error - wait sync.Cond -} - -func NewBufReader(r io.Reader) *bufReader { - reader := &bufReader{ - buf: &bytes.Buffer{}, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - buf := make([]byte, 1024) - for { - n, err := r.reader.Read(buf) - r.Lock() - if err != nil { - r.err = err - } else { - r.buf.Write(buf[0:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - -type WriteBroadcaster struct { - sync.Mutex - buf *bytes.Buffer - writers map[StreamWriter]bool -} - -type StreamWriter struct { - wc io.WriteCloser - stream string -} - -func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) { - w.Lock() - sw := StreamWriter{wc: writer, stream: stream} - w.writers[sw] = true - w.Unlock() -} - -type JSONLog struct { - Log string `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created time.Time `json:"time"` -} - -func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { - w.Lock() - defer w.Unlock() - w.buf.Write(p) - for sw := range w.writers { - lp := p - if sw.stream != "" { - lp = nil - for { - line, err := w.buf.ReadString('\n') - if err != nil { - w.buf.Write([]byte(line)) - break - } - b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()}) - if err != nil { - // On error, evict the writer - delete(w.writers, sw) - continue - } - lp = append(lp, b...) - lp = append(lp, '\n') - } - } - if n, err := sw.wc.Write(lp); err != nil || n != len(lp) { - // On error, evict the writer - delete(w.writers, sw) - } - } - return len(p), nil -} - -func (w *WriteBroadcaster) CloseWriters() error { - w.Lock() - defer w.Unlock() - for sw := range w.writers { - sw.wc.Close() - } - w.writers = make(map[StreamWriter]bool) - return nil -} - -func NewWriteBroadcaster() *WriteBroadcaster { - return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)} -} - func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } -// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. -// This is used to retrieve image and container IDs by more convenient shorthand prefixes. -type TruncIndex struct { - sync.RWMutex - index *suffixarray.Index - ids map[string]bool - bytes []byte -} - -func NewTruncIndex() *TruncIndex { - return &TruncIndex{ - index: suffixarray.New([]byte{' '}), - ids: make(map[string]bool), - bytes: []byte{' '}, - } -} - -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() - if strings.Contains(id, " ") { - return fmt.Errorf("Illegal character: ' '") - } - if _, exists := idx.ids[id]; exists { - return fmt.Errorf("Id already exists: %s", id) - } - idx.ids[id] = true - idx.bytes = append(idx.bytes, []byte(id+" ")...) - idx.index = suffixarray.New(idx.bytes) - return nil -} - -func (idx *TruncIndex) Delete(id string) error { - idx.Lock() - defer idx.Unlock() - if _, exists := idx.ids[id]; !exists { - return fmt.Errorf("No such id: %s", id) - } - before, after, err := idx.lookup(id) - if err != nil { - return err - } - delete(idx.ids, id) - idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...) - idx.index = suffixarray.New(idx.bytes) - return nil -} - -func (idx *TruncIndex) lookup(s string) (int, int, error) { - offsets := idx.index.Lookup([]byte(" "+s), -1) - //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes()) - if offsets == nil || len(offsets) == 0 || len(offsets) > 1 { - return -1, -1, fmt.Errorf("No such id: %s", s) - } - offsetBefore := offsets[0] + 1 - offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ") - return offsetBefore, offsetAfter, nil -} - -func (idx *TruncIndex) Get(s string) (string, error) { - idx.RLock() - defer idx.RUnlock() - before, after, err := idx.lookup(s) - //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after) - if err != nil { - return "", err - } - return string(idx.bytes[before:after]), err -} - // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller @@ -493,6 +171,34 @@ return id[:shortLen] } +// GenerateRandomID returns an unique id +func GenerateRandomID() string { + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { + continue + } + return value + } +} + +func ValidateID(id string) error { + if id == "" { + return fmt.Errorf("Id can't be empty") + } + if strings.Contains(id, ":") { + return fmt.Errorf("Invalid character in id: ':'") + } + return nil +} + // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) @@ -544,92 +250,6 @@ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } -type KernelVersionInfo struct { - Kernel int - Major int - Minor int - Flavor string -} - -func (k *KernelVersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// Compare two KernelVersionInfo struct. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b *KernelVersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -func GetKernelVersion() (*KernelVersionInfo, error) { - var ( - err error - ) - - uts, err := uname() - if err != nil { - return nil, err - } - - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) -} - -func ParseRelease(release string) (*KernelVersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &KernelVersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} - // FIXME: this is deprecated by CopyWithTar in archive.go func CopyDirectory(source, dest string) error { if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { @@ -638,10 +258,6 @@ return nil } -type NopFlusher struct{} - -func (f *NopFlusher) Flush() {} - type WriteFlusher struct { sync.Mutex w io.Writer @@ -668,7 +284,7 @@ if f, ok := w.(http.Flusher); ok { flusher = f } else { - flusher = &NopFlusher{} + flusher = &ioutils.NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } @@ -685,159 +301,17 @@ } func IsGIT(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") -} - -// GetResolvConf opens and read the content of /etc/resolv.conf. -// It returns it as byte slice. -func GetResolvConf() ([]byte, error) { - resolv, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - Errorf("Error openning resolv.conf: %s", err) - return nil, err - } - return resolv, nil -} - -// CheckLocalDns looks into the /etc/resolv.conf, -// it returns true if there is a local nameserver or if there is no nameserver. -func CheckLocalDns(resolvConf []byte) bool { - var parsedResolvConf = StripComments(resolvConf, []byte("#")) - if !bytes.Contains(parsedResolvConf, []byte("nameserver")) { - return true - } - for _, ip := range [][]byte{ - []byte("127.0.0.1"), - []byte("127.0.1.1"), - } { - if bytes.Contains(parsedResolvConf, ip) { - return true - } - } - return false -} - -// StripComments parses input into lines and strips away comments. -func StripComments(input []byte, commentMarker []byte) []byte { - lines := bytes.Split(input, []byte("\n")) - var output []byte - for _, currentLine := range lines { - var commentIndex = bytes.Index(currentLine, commentMarker) - if commentIndex == -1 { - output = append(output, currentLine...) - } else { - output = append(output, currentLine[:commentIndex]...) - } - output = append(output, []byte("\n")...) - } - return output -} - -// GetNameserversAsCIDR returns nameservers (if any) listed in -// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") -// This function's output is intended for net.ParseCIDR -func GetNameserversAsCIDR(resolvConf []byte) []string { - var parsedResolvConf = StripComments(resolvConf, []byte("#")) - nameservers := []string{} - re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) - for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) { - var ns = re.FindSubmatch(line) - if len(ns) > 0 { - nameservers = append(nameservers, string(ns[1])+"/32") - } - } - - return nameservers -} - -// FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { - var ( - proto string - host string - port int - ) - addr = strings.TrimSpace(addr) - switch { - case addr == "tcp://": - return "", fmt.Errorf("Invalid bind address format: %s", addr) - case strings.HasPrefix(addr, "unix://"): - proto = "unix" - addr = strings.TrimPrefix(addr, "unix://") - if addr == "" { - addr = defaultUnix - } - case strings.HasPrefix(addr, "tcp://"): - proto = "tcp" - addr = strings.TrimPrefix(addr, "tcp://") - case strings.HasPrefix(addr, "fd://"): - return addr, nil - case addr == "": - proto = "unix" - addr = defaultUnix - default: - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid bind address protocol: %s", addr) - } - proto = "tcp" - } - - if proto != "unix" && strings.Contains(addr, ":") { - hostParts := strings.Split(addr, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - if hostParts[0] != "" { - host = hostParts[0] - } else { - host = defaultHost - } - - if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { - port = p - } else { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - - } else if proto == "tcp" && !strings.Contains(addr, ":") { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } else { - host = addr - } - if proto == "unix" { - return fmt.Sprintf("%s://%s", proto, host), nil - } - return fmt.Sprintf("%s://%s:%d", proto, host, port), nil + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) } -func GetReleaseVersion() string { - resp, err := http.Get("https://get.docker.io/latest") - if err != nil { - return "" - } - defer resp.Body.Close() - if resp.ContentLength > 24 || resp.StatusCode != 200 { - return "" - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "" - } - return strings.TrimSpace(string(body)) -} +var ( + localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) +) -// Get a repos name and returns the right reposName + tag -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -func ParseRepositoryTag(repos string) (string, string) { - n := strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" +// RemoveLocalDns looks into the /etc/resolv.conf, +// and removes any local nameserver entries. +func RemoveLocalDns(resolvConf []byte) []byte { + return localHostRx.ReplaceAll(resolvConf, []byte{}) } // An StatusError reports an unsuccessful exit by a command. @@ -885,27 +359,6 @@ return buf.String() } -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - var globalTestID string // TestDirectory creates a new temporary directory and returns its path. @@ -963,22 +416,6 @@ return io.Copy(df, sf) } -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - // ReplaceOrAppendValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { @@ -1018,3 +455,86 @@ } return realPath, nil } + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +func StringsContainsNoCase(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} diff -Nru docker.io-0.9.1~dfsg1/utils/utils_test.go docker.io-1.3.2~dfsg1/utils/utils_test.go --- docker.io-0.9.1~dfsg1/utils/utils_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils/utils_test.go 2014-11-24 17:38:01.000000000 +0000 @@ -1,487 +1,10 @@ package utils import ( - "bytes" - "errors" - "io" - "io/ioutil" "os" - "strings" "testing" ) -func TestBufReader(t *testing.T) { - reader, writer := io.Pipe() - bufreader := NewBufReader(reader) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -type dummyWriter struct { - buffer bytes.Buffer - failOnWrite bool -} - -func (dw *dummyWriter) Write(p []byte) (n int, err error) { - if dw.failOnWrite { - return 0, errors.New("Fake fail") - } - return dw.buffer.Write(p) -} - -func (dw *dummyWriter) String() string { - return dw.buffer.String() -} - -func (dw *dummyWriter) Close() error { - return nil -} - -func TestWriteBroadcaster(t *testing.T) { - writer := NewWriteBroadcaster() - - // Test 1: Both bufferA and bufferB should contain "foo" - bufferA := &dummyWriter{} - writer.AddWriter(bufferA, "") - bufferB := &dummyWriter{} - writer.AddWriter(bufferB, "") - writer.Write([]byte("foo")) - - if bufferA.String() != "foo" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foo" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - // Test2: bufferA and bufferB should contain "foobar", - // while bufferC should only contain "bar" - bufferC := &dummyWriter{} - writer.AddWriter(bufferC, "") - writer.Write([]byte("bar")) - - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - if bufferC.String() != "bar" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - // Test3: Test eviction on failure - bufferA.failOnWrite = true - writer.Write([]byte("fail")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfail" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - // Even though we reset the flag, no more writes should go in there - bufferA.failOnWrite = false - writer.Write([]byte("test")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfailtest" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - writer.CloseWriters() -} - -type devNullCloser int - -func (d devNullCloser) Close() error { - return nil -} - -func (d devNullCloser) Write(buf []byte) (int, error) { - return len(buf), nil -} - -// This test checks for races. It is only useful when run with the race detector. -func TestRaceWriteBroadcaster(t *testing.T) { - writer := NewWriteBroadcaster() - c := make(chan bool) - go func() { - writer.AddWriter(devNullCloser(0), "") - c <- true - }() - writer.Write([]byte("hello")) - <-c -} - -// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. -func TestTruncIndex(t *testing.T) { - index := NewTruncIndex() - // Get on an empty index - if _, err := index.Get("foobar"); err == nil { - t.Fatal("Get on an empty index should return an error") - } - - // Spaces should be illegal in an id - if err := index.Add("I have a space"); err == nil { - t.Fatalf("Adding an id with ' ' should return an error") - } - - id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" - // Add an id - if err := index.Add(id); err != nil { - t.Fatal(err) - } - // Get a non-existing id - assertIndexGet(t, index, "abracadabra", "", true) - // Get the exact id - assertIndexGet(t, index, id, id, false) - // The first letter should match - assertIndexGet(t, index, id[:1], id, false) - // The first half should match - assertIndexGet(t, index, id[:len(id)/2], id, false) - // The second half should NOT match - assertIndexGet(t, index, id[len(id)/2:], "", true) - - id2 := id[:6] + "blabla" - // Add an id - if err := index.Add(id2); err != nil { - t.Fatal(err) - } - // Both exact IDs should work - assertIndexGet(t, index, id, id, false) - assertIndexGet(t, index, id2, id2, false) - - // 6 characters or less should conflict - assertIndexGet(t, index, id[:6], "", true) - assertIndexGet(t, index, id[:4], "", true) - assertIndexGet(t, index, id[:1], "", true) - - // 7 characters should NOT conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id2[:7], id2, false) - - // Deleting a non-existing id should return an error - if err := index.Delete("non-existing"); err == nil { - t.Fatalf("Deleting a non-existing id should return an error") - } - - // Deleting id2 should remove conflicts - if err := index.Delete(id2); err != nil { - t.Fatal(err) - } - // id2 should no longer work - assertIndexGet(t, index, id2, "", true) - assertIndexGet(t, index, id2[:7], "", true) - assertIndexGet(t, index, id2[:11], "", true) - - // conflicts between id and id2 should be gone - assertIndexGet(t, index, id[:6], id, false) - assertIndexGet(t, index, id[:4], id, false) - assertIndexGet(t, index, id[:1], id, false) - - // non-conflicting substrings should still not conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id[:15], id, false) - assertIndexGet(t, index, id, id, false) -} - -func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { - if result, err := index.Get(input); err != nil && !expectError { - t.Fatalf("Unexpected error getting '%s': %s", input, err) - } else if err == nil && expectError { - t.Fatalf("Getting '%s' should return an error", input) - } else if result != expectedResult { - t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) - } -} - -func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareKernelVersion(t *testing.T) { - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, - 1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) -} - -func TestHumanSize(t *testing.T) { - - size := strings.Trim(HumanSize(1000), " \t") - expect := "1 kB" - if size != expect { - t.Errorf("1000 -> expected '%s', got '%s'", expect, size) - } - - size = strings.Trim(HumanSize(1024), " \t") - expect = "1.024 kB" - if size != expect { - t.Errorf("1024 -> expected '%s', got '%s'", expect, size) - } -} - -func TestRAMInBytes(t *testing.T) { - assertRAMInBytes(t, "32", false, 32) - assertRAMInBytes(t, "32b", false, 32) - assertRAMInBytes(t, "32B", false, 32) - assertRAMInBytes(t, "32k", false, 32*1024) - assertRAMInBytes(t, "32K", false, 32*1024) - assertRAMInBytes(t, "32kb", false, 32*1024) - assertRAMInBytes(t, "32Kb", false, 32*1024) - assertRAMInBytes(t, "32Mb", false, 32*1024*1024) - assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) - - assertRAMInBytes(t, "", true, -1) - assertRAMInBytes(t, "hello", true, -1) - assertRAMInBytes(t, "-32", true, -1) - assertRAMInBytes(t, " 32 ", true, -1) - assertRAMInBytes(t, "32 mb", true, -1) - assertRAMInBytes(t, "32m b", true, -1) - assertRAMInBytes(t, "32bm", true, -1) -} - -func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { - actualBytes, err := RAMInBytes(size) - if (err != nil) && !expectError { - t.Errorf("Unexpected error parsing '%s': %s", size, err) - } - if (err == nil) && expectError { - t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) - } - if actualBytes != expectedBytes { - t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) - } -} - -func TestParseHost(t *testing.T) { - var ( - defaultHttpHost = "127.0.0.1" - defaultUnix = "/var/run/docker.sock" - ) - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { - t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { - t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { - t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { - t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { - t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:4243"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) - } -} - -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } -} - -func TestGetResolvConf(t *testing.T) { - resolvConfUtils, err := GetResolvConf() - if err != nil { - t.Fatal(err) - } - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - if string(resolvConfUtils) != string(resolvConfSystem) { - t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") - } -} - -func TestCheckLocalDns(t *testing.T) { - for resolv, result := range map[string]bool{`# Dynamic -nameserver 10.0.2.3 -search dotcloud.net`: false, - `# Dynamic -#nameserver 127.0.0.1 -nameserver 10.0.2.3 -search dotcloud.net`: false, - `# Dynamic -nameserver 10.0.2.3 #not used 127.0.1.1 -search dotcloud.net`: false, - `# Dynamic -#nameserver 10.0.2.3 -#search dotcloud.net`: true, - `# Dynamic -nameserver 127.0.0.1 -search dotcloud.net`: true, - `# Dynamic -nameserver 127.0.1.1 -search dotcloud.net`: true, - `# Dynamic -`: true, - ``: true, - } { - if CheckLocalDns([]byte(resolv)) != result { - t.Fatalf("Wrong local dns detection: {%s} should be %v", resolv, result) - } - } -} - -func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { - var ( - a *KernelVersionInfo - ) - a, _ = ParseRelease(release) - - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } - if a.Flavor != b.Flavor { - t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) - } -} - -func TestParseRelease(t *testing.T) { - assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) - assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) - assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) -} - -func TestParsePortMapping(t *testing.T) { - data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") - if err != nil { - t.Fatal(err) - } - - if len(data) != 3 { - t.FailNow() - } - if data["ip"] != "192.168.1.1" { - t.Fail() - } - if data["public"] != "80" { - t.Fail() - } - if data["private"] != "8080" { - t.Fail() - } -} - -func TestGetNameserversAsCIDR(t *testing.T) { - for resolv, result := range map[string][]string{` -nameserver 1.2.3.4 -nameserver 40.3.200.10 -search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, - `search example.com`: {}, - `nameserver 1.2.3.4 -search example.com -nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, - ``: {}, - ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 -#nameserver 4.3.2.1`: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, - } { - test := GetNameserversAsCIDR([]byte(resolv)) - if !StrSlicesEqual(test, result) { - t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func StrSlicesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if v != b[i] { - return false - } - } - - return true -} - func TestReplaceAndAppendEnvVars(t *testing.T) { var ( d = []string{"HOME=/"} diff -Nru docker.io-0.9.1~dfsg1/utils.go docker.io-1.3.2~dfsg1/utils.go --- docker.io-0.9.1~dfsg1/utils.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/namesgenerator" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" -) - -type Change struct { - archive.Change -} - -func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { - if config.PortSpecs != nil { - ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) - if err != nil { - return err - } - config.PortSpecs = nil - if len(bindings) > 0 { - if hostConfig == nil { - hostConfig = &runconfig.HostConfig{} - } - hostConfig.PortBindings = bindings - } - - if config.ExposedPorts == nil { - config.ExposedPorts = make(nat.PortSet, len(ports)) - } - for k, v := range ports { - config.ExposedPorts[k] = v - } - } - return nil -} - -// Links come in the format of -// name:alias -func parseLink(rawLink string) (map[string]string, error) { - return utils.PartParser("name:alias", rawLink) -} - -type checker struct { - runtime *Runtime -} - -func (c *checker) Exists(name string) bool { - return c.runtime.containerGraph.Exists("/" + name) -} - -// Generate a random and unique name -func generateRandomName(runtime *Runtime) (string, error) { - return namesgenerator.GenerateRandomName(&checker{runtime}) -} diff -Nru docker.io-0.9.1~dfsg1/utils_test.go docker.io-1.3.2~dfsg1/utils_test.go --- docker.io-0.9.1~dfsg1/utils_test.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/utils_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -package docker - -import ( - "bytes" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" -) - -func fakeTar() (io.Reader, error) { - content := []byte("Hello world!\n") - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { - hdr := new(tar.Header) - hdr.Size = int64(len(content)) - hdr.Name = name - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - tw.Write([]byte(content)) - } - tw.Close() - return buf, nil -} diff -Nru docker.io-0.9.1~dfsg1/VERSION docker.io-1.3.2~dfsg1/VERSION --- docker.io-0.9.1~dfsg1/VERSION 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/VERSION 2014-11-24 17:38:01.000000000 +0000 @@ -1 +1 @@ -0.9.1 +1.3.2 diff -Nru docker.io-0.9.1~dfsg1/version.go docker.io-1.3.2~dfsg1/version.go --- docker.io-0.9.1~dfsg1/version.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/version.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" - "runtime" -) - -func GetVersion(job *engine.Job) engine.Status { - if _, err := dockerVersion().WriteTo(job.Stdout); err != nil { - job.Errorf("%s", err) - return engine.StatusErr - } - return engine.StatusOK -} - -// dockerVersion returns detailed version information in the form of a queriable -// environment. -func dockerVersion() *engine.Env { - v := &engine.Env{} - v.Set("Version", dockerversion.VERSION) - v.Set("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", runtime.Version()) - v.Set("Os", runtime.GOOS) - v.Set("Arch", runtime.GOARCH) - // FIXME:utils.GetKernelVersion should only be needed here - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - return v -} diff -Nru docker.io-0.9.1~dfsg1/volumes/MAINTAINERS docker.io-1.3.2~dfsg1/volumes/MAINTAINERS --- docker.io-0.9.1~dfsg1/volumes/MAINTAINERS 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/volumes/MAINTAINERS 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1 @@ +Brian Goff (@cpuguy83) diff -Nru docker.io-0.9.1~dfsg1/volumes/repository.go docker.io-1.3.2~dfsg1/volumes/repository.go --- docker.io-0.9.1~dfsg1/volumes/repository.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/volumes/repository.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,217 @@ +package volumes + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +type Repository struct { + configPath string + driver graphdriver.Driver + volumes map[string]*Volume + lock sync.Mutex +} + +func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) { + abspath, err := filepath.Abs(configPath) + if err != nil { + return nil, err + } + + // Create the config path + if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + repo := &Repository{ + driver: driver, + configPath: abspath, + volumes: make(map[string]*Volume), + } + + return repo, repo.restore() +} + +func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { + var ( + isBindMount bool + err error + id = utils.GenerateRandomID() + ) + if path != "" { + isBindMount = true + } + + if path == "" { + path, err = r.createNewVolumePath(id) + if err != nil { + return nil, err + } + } + path = filepath.Clean(path) + + path, err = filepath.EvalSymlinks(path) + if err != nil { + return nil, err + } + + v := &Volume{ + ID: id, + Path: path, + repository: r, + Writable: writable, + containers: make(map[string]struct{}), + configPath: r.configPath + "/" + id, + IsBindMount: isBindMount, + } + + if err := v.initialize(); err != nil { + return nil, err + } + + return v, r.add(v) +} + +func (r *Repository) restore() error { + dir, err := ioutil.ReadDir(r.configPath) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + path, err := r.driver.Get(id, "") + if err != nil { + log.Debugf("Could not find volume for %s: %v", id, err) + continue + } + vol := &Volume{ + ID: id, + configPath: r.configPath + "/" + id, + containers: make(map[string]struct{}), + Path: path, + } + if err := vol.FromDisk(); err != nil { + if !os.IsNotExist(err) { + log.Debugf("Error restoring volume: %v", err) + continue + } + if err := vol.initialize(); err != nil { + log.Debugf("%s", err) + continue + } + } + if err := r.add(vol); err != nil { + log.Debugf("Error restoring volume: %v", err) + } + } + return nil +} + +func (r *Repository) Get(path string) *Volume { + r.lock.Lock() + vol := r.get(path) + r.lock.Unlock() + return vol +} + +func (r *Repository) get(path string) *Volume { + path, err := filepath.EvalSymlinks(path) + if err != nil { + return nil + } + return r.volumes[filepath.Clean(path)] +} + +func (r *Repository) Add(volume *Volume) error { + r.lock.Lock() + defer r.lock.Unlock() + return r.add(volume) +} + +func (r *Repository) add(volume *Volume) error { + if vol := r.get(volume.Path); vol != nil { + return fmt.Errorf("Volume exists: %s", volume.ID) + } + r.volumes[volume.Path] = volume + return nil +} + +func (r *Repository) Remove(volume *Volume) { + r.lock.Lock() + r.remove(volume) + r.lock.Unlock() +} + +func (r *Repository) remove(volume *Volume) { + delete(r.volumes, volume.Path) +} + +func (r *Repository) Delete(path string) error { + r.lock.Lock() + defer r.lock.Unlock() + path, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + volume := r.get(filepath.Clean(path)) + if volume == nil { + return fmt.Errorf("Volume %s does not exist", path) + } + + if volume.IsBindMount { + return fmt.Errorf("Volume %s is a bind-mount and cannot be removed", volume.Path) + } + containers := volume.Containers() + if len(containers) > 0 { + return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers) + } + + if err := os.RemoveAll(volume.configPath); err != nil { + return err + } + + if err := r.driver.Remove(volume.ID); err != nil { + if !os.IsNotExist(err) { + return err + } + } + + r.remove(volume) + return nil +} + +func (r *Repository) createNewVolumePath(id string) (string, error) { + if err := r.driver.Create(id, ""); err != nil { + return "", err + } + + path, err := r.driver.Get(id, "") + if err != nil { + return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err) + } + + return path, nil +} + +func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) { + r.lock.Lock() + defer r.lock.Unlock() + + if path == "" { + return r.newVolume(path, writable) + } + + if v := r.get(path); v != nil { + return v, nil + } + + return r.newVolume(path, writable) +} diff -Nru docker.io-0.9.1~dfsg1/volumes/volume.go docker.io-1.3.2~dfsg1/volumes/volume.go --- docker.io-0.9.1~dfsg1/volumes/volume.go 1970-01-01 00:00:00.000000000 +0000 +++ docker.io-1.3.2~dfsg1/volumes/volume.go 2014-11-24 17:38:01.000000000 +0000 @@ -0,0 +1,139 @@ +package volumes + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/symlink" +) + +type Volume struct { + ID string + Path string + IsBindMount bool + Writable bool + containers map[string]struct{} + configPath string + repository *Repository + lock sync.Mutex +} + +func (v *Volume) IsDir() (bool, error) { + stat, err := os.Stat(v.Path) + if err != nil { + return false, err + } + + return stat.IsDir(), nil +} + +func (v *Volume) Containers() []string { + v.lock.Lock() + + var containers []string + for c := range v.containers { + containers = append(containers, c) + } + + v.lock.Unlock() + return containers +} + +func (v *Volume) RemoveContainer(containerId string) { + v.lock.Lock() + delete(v.containers, containerId) + v.lock.Unlock() +} + +func (v *Volume) AddContainer(containerId string) { + v.lock.Lock() + v.containers[containerId] = struct{}{} + v.lock.Unlock() +} + +func (v *Volume) createIfNotExist() error { + if stat, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) { + if stat.IsDir() { + os.MkdirAll(v.Path, 0755) + } + + if err := os.MkdirAll(filepath.Dir(v.Path), 0755); err != nil { + return err + } + f, err := os.OpenFile(v.Path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + return nil +} + +func (v *Volume) initialize() error { + v.lock.Lock() + defer v.lock.Unlock() + + if err := v.createIfNotExist(); err != nil { + return err + } + + if err := os.MkdirAll(v.configPath, 0755); err != nil { + return err + } + jsonPath, err := v.jsonPath() + if err != nil { + return err + } + f, err := os.Create(jsonPath) + if err != nil { + return err + } + defer f.Close() + + return v.toDisk() +} + +func (v *Volume) ToDisk() error { + v.lock.Lock() + defer v.lock.Unlock() + return v.toDisk() +} +func (v *Volume) toDisk() error { + data, err := json.Marshal(v) + if err != nil { + return err + } + + pth, err := v.jsonPath() + if err != nil { + return err + } + + return ioutil.WriteFile(pth, data, 0666) +} +func (v *Volume) FromDisk() error { + v.lock.Lock() + defer v.lock.Unlock() + pth, err := v.jsonPath() + if err != nil { + return err + } + + data, err := ioutil.ReadFile(pth) + if err != nil { + return err + } + + return json.Unmarshal(data, v) +} + +func (v *Volume) jsonPath() (string, error) { + return v.getRootResourcePath("config.json") +} +func (v *Volume) getRootResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) +} diff -Nru docker.io-0.9.1~dfsg1/volumes.go docker.io-1.3.2~dfsg1/volumes.go --- docker.io-0.9.1~dfsg1/volumes.go 2014-03-25 22:00:37.000000000 +0000 +++ docker.io-1.3.2~dfsg1/volumes.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,332 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "syscall" -) - -type BindMap struct { - SrcPath string - DstPath string - Mode string -} - -func prepareVolumesForContainer(container *Container) error { - if container.Volumes == nil || len(container.Volumes) == 0 { - container.Volumes = make(map[string]string) - container.VolumesRW = make(map[string]bool) - if err := applyVolumesFrom(container); err != nil { - return err - } - } - - if err := createVolumes(container); err != nil { - return err - } - return nil -} - -func mountVolumesForContainer(container *Container, envPath string) error { - // Setup the root fs as a bind mount of the base fs - var ( - root = container.RootfsPath() - runtime = container.runtime - ) - if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { - return nil - } - - // Create a bind mount of the base fs as a place where we can add mounts - // without affecting the ability to access the base fs - if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { - return err - } - - // Make sure the root fs is private so the mounts here don't propagate to basefs - if err := mount.ForceMount(root, root, "none", "private"); err != nil { - return err - } - - // Mount docker specific files into the containers root fs - if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { - return err - } - - if container.HostnamePath != "" && container.HostsPath != "" { - if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { - return err - } - } - - // Mount user specified volumes - for r, v := range container.Volumes { - mountAs := "ro" - if container.VolumesRW[r] { - mountAs = "rw" - } - - r = filepath.Join(root, r) - if p, err := utils.FollowSymlinkInScope(r, root); err != nil { - return err - } else { - r = p - } - - if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { - return err - } - } - return nil -} - -func unmountVolumesForContainer(container *Container) { - var ( - root = container.RootfsPath() - mounts = []string{ - root, - filepath.Join(root, "/.dockerinit"), - filepath.Join(root, "/.dockerenv"), - filepath.Join(root, "/etc/resolv.conf"), - } - ) - - if container.HostnamePath != "" && container.HostsPath != "" { - mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) - } - - for r := range container.Volumes { - mounts = append(mounts, filepath.Join(root, r)) - } - - for i := len(mounts) - 1; i >= 0; i-- { - if lastError := mount.Unmount(mounts[i]); lastError != nil { - log.Printf("Failed to umount %v: %v", mounts[i], lastError) - } - } -} - -func applyVolumesFrom(container *Container) error { - if container.Config.VolumesFrom != "" { - for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { - var ( - mountRW = true - specParts = strings.SplitN(containerSpec, ":", 2) - ) - - switch len(specParts) { - case 0: - return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) - case 2: - switch specParts[1] { - case "ro": - mountRW = false - case "rw": // mountRW is already true - default: - return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) - } - } - - c := container.runtime.Get(specParts[0]) - if c == nil { - return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) - } - - for volPath, id := range c.Volumes { - if _, exists := container.Volumes[volPath]; exists { - continue - } - if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { - return err - } - container.Volumes[volPath] = id - if isRW, exists := c.VolumesRW[volPath]; exists { - container.VolumesRW[volPath] = isRW && mountRW - } - } - - } - } - return nil -} - -func getBindMap(container *Container) (map[string]BindMap, error) { - var ( - // Create the requested bind mounts - binds = make(map[string]BindMap) - // Define illegal container destinations - illegalDsts = []string{"/", "."} - ) - - for _, bind := range container.hostConfig.Binds { - // FIXME: factorize bind parsing in parseBind - var ( - src, dst, mode string - arr = strings.Split(bind, ":") - ) - - if len(arr) == 2 { - src = arr[0] - dst = arr[1] - mode = "rw" - } else if len(arr) == 3 { - src = arr[0] - dst = arr[1] - mode = arr[2] - } else { - return nil, fmt.Errorf("Invalid bind specification: %s", bind) - } - - // Bail if trying to mount to an illegal destination - for _, illegal := range illegalDsts { - if dst == illegal { - return nil, fmt.Errorf("Illegal bind destination: %s", dst) - } - } - - bindMap := BindMap{ - SrcPath: src, - DstPath: dst, - Mode: mode, - } - binds[filepath.Clean(dst)] = bindMap - } - return binds, nil -} - -func createVolumes(container *Container) error { - binds, err := getBindMap(container) - if err != nil { - return err - } - - volumesDriver := container.runtime.volumes.driver - // Create the requested volumes if they don't exist - for volPath := range container.Config.Volumes { - volPath = filepath.Clean(volPath) - volIsDir := true - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { - continue - } - var srcPath string - var isBindMount bool - srcRW := false - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - srcPath = bindMap.SrcPath - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - // Otherwise create an directory in $ROOT/volumes/ and use that - } else { - - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) - if err != nil { - return err - } - srcPath, err = volumesDriver.Get(c.ID) - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - srcRW = true // RW by default - } - - if p, err := filepath.EvalSymlinks(srcPath); err != nil { - return err - } else { - srcPath = p - } - - container.Volumes[volPath] = srcPath - container.VolumesRW[volPath] = srcRW - - // Create the mountpoint - volPath = filepath.Join(container.basefs, volPath) - rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) - if err != nil { - return err - } - - if _, err := os.Stat(rootVolPath); err != nil { - if os.IsNotExist(err) { - if volIsDir { - if err := os.MkdirAll(rootVolPath, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { - return err - } - if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { - return err - } else { - f.Close() - } - } - } - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - volList, err := ioutil.ReadDir(rootVolPath) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(srcPath) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { - return err - } - - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - } - } - } - } - return nil -}