Merge pull request #5921 from thaJeztah/bump_engine

vendor: github.com/docker/docker 185651d26bc6 (master, v28.0-dev)
This commit is contained in:
Sebastiaan van Stijn 2025-04-09 13:12:32 +02:00 committed by GitHub
commit 6714b50288
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 501 additions and 284 deletions

View File

@ -12,8 +12,8 @@ import (
"sort"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/atomicwriter"
"github.com/fvbommel/sortorder"
"github.com/moby/sys/atomicwriter"
"github.com/pkg/errors"
)

View File

@ -5,7 +5,7 @@ import (
"path/filepath"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/atomicwriter"
"github.com/moby/sys/atomicwriter"
"github.com/pkg/errors"
)

View File

@ -14,7 +14,7 @@ require (
github.com/distribution/reference v0.6.0
github.com/docker/cli-docs-tool v0.9.0
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v28.0.4+incompatible
github.com/docker/docker v28.0.5-0.20250407171702-185651d26bc6+incompatible // master / v28.x-dev
github.com/docker/docker-credential-helpers v0.9.2
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
@ -28,6 +28,7 @@ require (
github.com/mattn/go-runewidth v0.0.16
github.com/moby/patternmatcher v0.6.0
github.com/moby/swarmkit/v2 v2.0.0-20250103191802-8c1959736554
github.com/moby/sys/atomicwriter v0.1.0
github.com/moby/sys/capability v0.4.0
github.com/moby/sys/sequential v0.6.0
github.com/moby/sys/signal v0.7.1

View File

@ -52,8 +52,8 @@ github.com/docker/cli-docs-tool v0.9.0/go.mod h1:ClrwlNW+UioiRyH9GiAOe1o3J/TsY3T
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok=
github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.0.5-0.20250407171702-185651d26bc6+incompatible h1:LERt8odt82duY89hcP1zpsTK19omD2bfJZJe6U//89c=
github.com/docker/docker v28.0.5-0.20250407171702-185651d26bc6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.2 h1:50JF7ADQiHdAVBRtg/vy883Y4U5+5GmPOBNtUU+X+6A=
github.com/docker/docker-credential-helpers v0.9.2/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -169,6 +169,8 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/swarmkit/v2 v2.0.0-20250103191802-8c1959736554 h1:DMHJbgyNZWyrPKYjCYt2IxEO7KA0eSd4fo6KQsv2W84=
github.com/moby/swarmkit/v2 v2.0.0-20250103191802-8c1959736554/go.mod h1:mTTGIAz/59OGZR5Qe+QByIe3Nxc+sSuJkrsStFhr6Lg=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=

View File

@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.48"
DefaultVersion = "1.49"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon

View File

@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
basePath: "/v1.48"
basePath: "/v1.49"
info:
title: "Docker Engine API"
version: "1.48"
version: "1.49"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
If you omit the version-prefix, the current version of the API (v1.48) is used.
For example, calling `/info` is the same as calling `/v1.48/info`. Using the
If you omit the version-prefix, the current version of the API (v1.49) is used.
For example, calling `/info` is the same as calling `/v1.49/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@ -6856,6 +6856,8 @@ definitions:
description: "The network pool size"
type: "integer"
example: "24"
FirewallBackend:
$ref: "#/definitions/FirewallInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@ -6939,6 +6941,37 @@ definitions:
default: "plugins.moby"
example: "plugins.moby"
FirewallInfo:
description: |
Information about the daemon's firewalling configuration.
This field is currently only used on Linux, and omitted on other platforms.
type: "object"
x-nullable: true
properties:
Driver:
description: |
The name of the firewall backend driver.
type: "string"
example: "nftables"
Info:
description: |
Information about the firewall backend, provided as
"label" / "value" pairs.
<p><br /></p>
> **Note**: The information returned in this field, including the
> formatting of values and labels, should not be considered stable,
> and may change without notice.
type: "array"
items:
type: "array"
items:
type: "string"
example:
- ["ReloadedAt", "2025-01-01T00:00:00Z"]
# PluginsInfo is a temp struct holding Plugins name
# registered with docker daemon. It is used by Info struct
PluginsInfo:
@ -10537,6 +10570,7 @@ paths:
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/get:
get:
summary: "Export several images"
@ -10571,6 +10605,16 @@ paths:
type: "array"
items:
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON encoded OCI platform describing a platform which will be used
to select a platform-specific image to be saved if the image is
multi-platform.
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/load:
post:

View File

@ -128,11 +128,12 @@ type InspectResponse struct {
// compatibility.
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
// Manifests is a list of image manifests available in this image. It
// Manifests is a list of image manifests available in this image. It
// provides a more detailed view of the platform-specific image manifests or
// other image-attached data like build attestations.
//
// Only available if the daemon provides a multi-platform image store.
// Only available if the daemon provides a multi-platform image store, the client
// requests manifests AND does not request a specific platform.
//
// WARNING: This is experimental and may change at any time without any backward
// compatibility.

View File

@ -106,6 +106,11 @@ type LoadOptions struct {
type InspectOptions struct {
// Manifests returns the image manifests.
Manifests bool
// Platform selects the specific platform of a multi-platform image to inspect.
//
// This option is only available for API version 1.49 and up.
Platform *ocispec.Platform
}
// SaveOptions holds parameters to save images.

View File

@ -73,6 +73,7 @@ type Info struct {
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
CDISpecDirs []string
Containerd *ContainerdInfo `json:",omitempty"`
@ -151,3 +152,11 @@ type NetworkAddressPool struct {
Base string
Size int
}
// FirewallInfo describes the firewall backend.
type FirewallInfo struct {
// Driver is the name of the firewall backend driver.
Driver string `json:"Driver"`
// Info is a list of label/value pairs, containing information related to the firewall.
Info [][2]string `json:"Info,omitempty"`
}

View File

@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
repository = reference.FamiliarName(ref)
repository = ref.Name()
}
query := url.Values{}

View File

@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
query.Set("tag", getAPITagFromNamedRef(ref))
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))

View File

@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts
query.Set("manifests", "1")
}
if opts.apiOptions.Platform != nil {
if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil {
return image.InspectResponse{}, err
}
platform, err := encodePlatform(opts.apiOptions.Platform)
if err != nil {
return image.InspectResponse{}, err
}
query.Set("platform", platform)
}
resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {

View File

@ -4,6 +4,7 @@ import (
"bytes"
"github.com/docker/docker/api/types/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageInspectOption is a type representing functional options for the image inspect operation.
@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption {
})
}
// ImageInspectWithPlatform sets platform API option for the image inspect operation.
// This option is only available for API version 1.49 and up.
// With this option set, the image inspect operation will return information for the
// specified platform variant of the multi-platform image.
func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
clientOpts.apiOptions.Platform = platform
return nil
})
}
// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {

View File

@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
}
query := url.Values{}
query.Set("fromImage", reference.FamiliarName(ref))
query.Set("fromImage", ref.Name())
if !options.All {
query.Set("tag", getAPITagFromNamedRef(ref))
}

View File

@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
return nil, errors.New("cannot push a digest reference")
}
name := reference.FamiliarName(ref)
query := url.Values{}
if !options.All {
ref = reference.TagNameOnly(ref)
@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
query.Set("platform", string(pJson))
}
resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader)
}
if err != nil {
return nil, err

View File

@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
ref = reference.TagNameOnly(ref)
query := url.Values{}
query.Set("repo", reference.FamiliarName(ref))
query.Set("repo", ref.Name())
if tagged, ok := ref.(reference.Tagged); ok {
query.Set("tag", tagged.Tag())
}

View File

@ -75,7 +75,7 @@ func sameFsTime(a, b time.Time) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
return collectChanges(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
@ -103,7 +103,7 @@ type (
deleteChange func(string, string, os.FileInfo) (string, error)
)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
func collectChanges(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})

View File

@ -3,11 +3,15 @@ package idtools
import (
"fmt"
"os"
"github.com/moby/sys/user"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
//
// Deprecated: use [user.IDMap] instead.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
@ -17,28 +21,42 @@ type IDMap struct {
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership and permissions.
//
// Deprecated: use [user.MkdirAllAndChown] instead.
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, true)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership and permissions.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
//
// Deprecated: use [user.MkdirAndChown] instead.
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, false, true)
return user.MkdirAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership or permissions will be performed
//
// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead.
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
return mkdirAs(path, mode, owner, true, false)
return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
//
// Deprecated: use [(user.IdentityMapping).RootPair] instead.
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return getRootUIDGID(uidMap, gidMap)
}
// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
@ -101,11 +119,36 @@ type IdentityMapping struct {
GIDMaps []IDMap `json:"GIDMaps"`
}
// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping].
//
// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping {
return IdentityMapping{
UIDMaps: fromUserIDMap(u.UIDMaps),
GIDMaps: fromUserIDMap(u.GIDMaps),
}
}
func fromUserIDMap(u []user.IDMap) []IDMap {
if u == nil {
return nil
}
m := make([]IDMap, len(u))
for i := range u {
m[i] = IDMap{
ContainerID: int(u[i].ID),
HostID: int(u[i].ParentID),
Size: int(u[i].Count),
}
}
return m
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i IdentityMapping) RootPair() Identity {
uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps)
uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
return Identity{UID: uid, GID: gid}
}
@ -144,6 +187,8 @@ func (i IdentityMapping) Empty() bool {
}
// CurrentIdentity returns the identity of the current process
//
// Deprecated: use [os.Getuid] and [os.Getegid] instead.
func CurrentIdentity() Identity {
return Identity{UID: os.Getuid(), GID: os.Getegid()}
}

View File

@ -1,166 +0,0 @@
//go:build !windows
package idtools
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"github.com/moby/sys/user"
)
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
path, err := filepath.Abs(path)
if err != nil {
return err
}
stat, err := os.Stat(path)
if err == nil {
if !stat.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if !chownExisting {
return nil
}
// short-circuit -- we were called with an existing directory and chown was requested
return setPermissions(path, mode, owner, stat)
}
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if os.IsNotExist(err) {
paths = []string{path}
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err = os.MkdirAll(path, mode); err != nil {
return err
}
} else if err = os.Mkdir(path, mode); err != nil {
return err
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err = setPermissions(pathComponent, mode, owner, nil); err != nil {
return err
}
}
return nil
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username
//
// Deprecated: use [user.LookupUser] instead
func LookupUser(name string) (user.User, error) {
return user.LookupUser(name)
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid
//
// Deprecated: use [user.LookupUid] instead
func LookupUID(uid int) (user.User, error) {
return user.LookupUid(uid)
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
//
// Deprecated: use [user.LookupGroup] instead
func LookupGroup(name string) (user.Group, error) {
return user.LookupGroup(name)
}
// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
// dir is on an NFS share, so don't call chown unless we absolutely must.
// Likewise for setting permissions.
func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error {
if stat == nil {
var err error
stat, err = os.Stat(p)
if err != nil {
return err
}
}
if stat.Mode().Perm() != mode.Perm() {
if err := os.Chmod(p, mode.Perm()); err != nil {
return err
}
}
ssi := stat.Sys().(*syscall.Stat_t)
if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) {
return nil
}
return os.Chown(p, owner.UID, owner.GID)
}
// LoadIdentityMapping takes a requested username and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func LoadIdentityMapping(name string) (IdentityMapping, error) {
// TODO: Consider adding support for calling out to "getent"
usr, err := user.LookupUser(name)
if err != nil {
return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err)
}
subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
if err != nil {
return IdentityMapping{}, err
}
subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
if err != nil {
return IdentityMapping{}, err
}
return IdentityMapping{
UIDMaps: subuidRanges,
GIDMaps: subgidRanges,
}, nil
}
func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) {
uidstr := strconv.Itoa(usr.Uid)
rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool {
return sid.Name == usr.Name || sid.Name == uidstr
})
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
}
idMap := []IDMap{}
containerID := 0
for _, idrange := range rangeList {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: int(idrange.SubID),
Size: int(idrange.Count),
})
containerID = containerID + int(idrange.Count)
}
return idMap, nil
}

View File

@ -1,9 +1,5 @@
package idtools
import (
"os"
)
const (
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
@ -14,11 +10,3 @@ const (
ContainerUserSidString = "S-1-5-93-2-2"
)
// This is currently a wrapper around [os.MkdirAll] since currently
// permissions aren't set through this path, the identity isn't utilized.
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error {
return os.MkdirAll(path, 0)
}

View File

@ -4,7 +4,7 @@ import (
"io"
"os"
"github.com/docker/docker/pkg/atomicwriter"
"github.com/moby/sys/atomicwriter"
)
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a

View File

@ -294,12 +294,15 @@ func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool {
return false
}
// ValidateMirror validates an HTTP(S) registry mirror. It is used by the daemon
// to validate the daemon configuration.
func ValidateMirror(val string) (string, error) {
uri, err := url.Parse(val)
// ValidateMirror validates and normalizes an HTTP(S) registry mirror. It
// returns an error if the given mirrorURL is invalid, or the normalized
// format for the URL otherwise.
//
// It is used by the daemon to validate the daemon configuration.
func ValidateMirror(mirrorURL string) (string, error) {
uri, err := url.Parse(mirrorURL)
if err != nil {
return "", invalidParamWrapf(err, "invalid mirror: %q is not a valid URI", val)
return "", invalidParamWrapf(err, "invalid mirror: %q is not a valid URI", mirrorURL)
}
if uri.Scheme != "http" && uri.Scheme != "https" {
return "", invalidParamf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri)
@ -312,7 +315,7 @@ func ValidateMirror(val string) (string, error) {
uri.User = url.UserPassword(uri.User.Username(), "xxxxx")
return "", invalidParamf("invalid mirror: username/password not allowed in URI %q", uri)
}
return strings.TrimSuffix(val, "/") + "/", nil
return strings.TrimSuffix(mirrorURL, "/") + "/", nil
}
// ValidateIndexName validates an index name. It is used by the daemon to
@ -414,7 +417,6 @@ func newRepositoryInfo(config *serviceConfig, name reference.Named) *RepositoryI
func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) {
indexName := normalizeIndexName(reference.Domain(reposName))
if indexName == IndexName {
officialRepo := !strings.ContainsRune(reference.FamiliarName(reposName), '/')
return &RepositoryInfo{
Name: reference.TrimNamed(reposName),
Index: &registry.IndexInfo{
@ -423,21 +425,16 @@ func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) {
Secure: true,
Official: true,
},
Official: officialRepo,
Official: !strings.ContainsRune(reference.FamiliarName(reposName), '/'),
}, nil
}
insecure := false
if isInsecure(indexName) {
insecure = true
}
return &RepositoryInfo{
Name: reference.TrimNamed(reposName),
Index: &registry.IndexInfo{
Name: indexName,
Mirrors: []string{},
Secure: !insecure,
Secure: !isInsecure(indexName),
},
}, nil
}

View File

@ -29,15 +29,15 @@ func hostCertsDir(hostname string) string {
}
// newTLSConfig constructs a client TLS configuration based on server defaults
func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
func newTLSConfig(ctx context.Context, hostname string, isSecure bool) (*tls.Config, error) {
// PreferredServerCipherSuites should have no effect
tlsConfig := tlsconfig.ServerDefault()
tlsConfig.InsecureSkipVerify = !isSecure
if isSecure {
hostDir := hostCertsDir(hostname)
log.G(context.TODO()).Debugf("hostDir: %s", hostDir)
if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil {
log.G(ctx).Debugf("hostDir: %s", hostDir)
if err := loadTLSConfig(ctx, hostDir, tlsConfig); err != nil {
return nil, err
}
}
@ -58,12 +58,24 @@ func hasFile(files []os.DirEntry, name string) bool {
// including roots and certificate pairs and updates the
// provided TLS configuration.
func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
return loadTLSConfig(context.TODO(), directory, tlsConfig)
}
// loadTLSConfig reads the directory for TLS certificates including roots and
// certificate pairs, and updates the provided TLS configuration.
func loadTLSConfig(ctx context.Context, directory string, tlsConfig *tls.Config) error {
fs, err := os.ReadDir(directory)
if err != nil && !os.IsNotExist(err) {
if err != nil {
if os.IsNotExist(err) {
return nil
}
return invalidParam(err)
}
for _, f := range fs {
if ctx.Err() != nil {
return ctx.Err()
}
switch filepath.Ext(f.Name()) {
case ".crt":
if tlsConfig.RootCAs == nil {
@ -74,7 +86,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
tlsConfig.RootCAs = systemPool
}
fileName := filepath.Join(directory, f.Name())
log.G(context.TODO()).Debugf("crt: %s", fileName)
log.G(ctx).Debugf("crt: %s", fileName)
data, err := os.ReadFile(fileName)
if err != nil {
return err
@ -83,7 +95,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
case ".cert":
certName := f.Name()
keyName := certName[:len(certName)-5] + ".key"
log.G(context.TODO()).Debugf("cert: %s", filepath.Join(directory, certName))
log.G(ctx).Debugf("cert: %s", filepath.Join(directory, certName))
if !hasFile(fs, keyName) {
return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName)
}
@ -95,7 +107,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
case ".key":
keyName := f.Name()
certName := keyName[:len(keyName)-4] + ".cert"
log.G(context.TODO()).Debugf("key: %s", filepath.Join(directory, keyName))
log.G(ctx).Debugf("key: %s", filepath.Join(directory, keyName))
if !hasFile(fs, certName) {
return invalidParamf("missing client certificate %s for key %s", certName, keyName)
}

View File

@ -84,7 +84,6 @@ func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term s
}
func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, headers http.Header) (*registry.SearchResults, error) {
// TODO Use ctx when searching for repositories
if hasScheme(term) {
return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term)
}
@ -100,7 +99,7 @@ func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int,
remoteName = strings.TrimPrefix(remoteName, "library/")
}
endpoint, err := newV1Endpoint(index, headers)
endpoint, err := newV1Endpoint(ctx, index, headers)
if err != nil {
return nil, err
}
@ -131,7 +130,7 @@ func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int,
}
}
return newSession(client, endpoint).searchRepositories(remoteName, limit)
return newSession(client, endpoint).searchRepositories(ctx, remoteName, limit)
}
// splitReposSearchTerm breaks a search term into an index name and remote name

View File

@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"net/http"
"net/url"
"strings"
@ -31,8 +32,8 @@ type v1Endpoint struct {
// newV1Endpoint parses the given address to return a registry endpoint.
// TODO: remove. This is only used by search.
func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) {
tlsConfig, err := newTLSConfig(index.Name, index.Secure)
func newV1Endpoint(ctx context.Context, index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) {
tlsConfig, err := newTLSConfig(ctx, index.Name, index.Secure)
if err != nil {
return nil, err
}
@ -50,7 +51,10 @@ func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint,
// Try HTTPS ping to registry
endpoint.URL.Scheme = "https"
if _, err := endpoint.ping(); err != nil {
if _, err := endpoint.ping(ctx); err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
if endpoint.IsSecure {
// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fall back to HTTP.
@ -58,9 +62,9 @@ func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint,
}
// registry is insecure and HTTPS failed, fallback to HTTP.
log.G(context.TODO()).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint)
log.G(ctx).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint)
endpoint.URL.Scheme = "http"
if _, err2 := endpoint.ping(); err2 != nil {
if _, err2 := endpoint.ping(ctx); err2 != nil {
return nil, invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
}
}
@ -109,7 +113,7 @@ func (e *v1Endpoint) String() string {
}
// ping returns a v1PingResult which indicates whether the registry is standalone or not.
func (e *v1Endpoint) ping() (v1PingResult, error) {
func (e *v1Endpoint) ping(ctx context.Context) (v1PingResult, error) {
if e.String() == IndexServer {
// Skip the check, we know this one is valid
// (and we never want to fallback to http in case of error)
@ -117,14 +121,17 @@ func (e *v1Endpoint) ping() (v1PingResult, error) {
}
pingURL := e.String() + "_ping"
log.G(context.TODO()).WithField("url", pingURL).Debug("attempting v1 ping for registry endpoint")
req, err := http.NewRequest(http.MethodGet, pingURL, nil)
log.G(ctx).WithField("url", pingURL).Debug("attempting v1 ping for registry endpoint")
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pingURL, nil)
if err != nil {
return v1PingResult{}, invalidParam(err)
}
resp, err := e.client.Do(req)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return v1PingResult{}, err
}
return v1PingResult{}, invalidParam(err)
}
@ -136,7 +143,7 @@ func (e *v1Endpoint) ping() (v1PingResult, error) {
if v == "1" || strings.EqualFold(v, "true") {
info.Standalone = true
}
log.G(context.TODO()).Debugf("v1PingResult.Standalone (from X-Docker-Registry-Standalone header): %t", info.Standalone)
log.G(ctx).Debugf("v1PingResult.Standalone (from X-Docker-Registry-Standalone header): %t", info.Standalone)
return info, nil
}
@ -146,11 +153,11 @@ func (e *v1Endpoint) ping() (v1PingResult, error) {
Standalone: true,
}
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
log.G(context.TODO()).WithError(err).Debug("error unmarshaling _ping response")
log.G(ctx).WithError(err).Debug("error unmarshaling _ping response")
// don't stop here. Just assume sane defaults
}
log.G(context.TODO()).Debugf("v1PingResult.Standalone: %t", info.Standalone)
log.G(ctx).Debugf("v1PingResult.Standalone: %t", info.Standalone)
return info, nil
}

View File

@ -26,8 +26,8 @@ type session struct {
}
type authTransport struct {
http.RoundTripper
*registry.AuthConfig
base http.RoundTripper
authConfig *registry.AuthConfig
alwaysSetBasicAuth bool
token []string
@ -54,8 +54,8 @@ func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, a
base = http.DefaultTransport
}
return &authTransport{
RoundTripper: base,
AuthConfig: authConfig,
base: base,
authConfig: authConfig,
alwaysSetBasicAuth: alwaysSetBasicAuth,
modReq: make(map[*http.Request]*http.Request),
}
@ -114,7 +114,7 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
// a 302 redirect is detected by looking at the Referrer header as go http package adds said header.
// This is safe as Docker doesn't set Referrer in other scenarios.
if orig.Header.Get("Referer") != "" && !trustedLocation(orig) {
return tr.RoundTripper.RoundTrip(orig)
return tr.base.RoundTrip(orig)
}
req := cloneRequest(orig)
@ -123,22 +123,22 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
tr.mu.Unlock()
if tr.alwaysSetBasicAuth {
if tr.AuthConfig == nil {
if tr.authConfig == nil {
return nil, errors.New("unexpected error: empty auth config")
}
req.SetBasicAuth(tr.Username, tr.Password)
return tr.RoundTripper.RoundTrip(req)
req.SetBasicAuth(tr.authConfig.Username, tr.authConfig.Password)
return tr.base.RoundTrip(req)
}
// Don't override
if req.Header.Get("Authorization") == "" {
if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 {
req.SetBasicAuth(tr.Username, tr.Password)
if req.Header.Get("X-Docker-Token") == "true" && tr.authConfig != nil && len(tr.authConfig.Username) > 0 {
req.SetBasicAuth(tr.authConfig.Username, tr.authConfig.Password)
} else if len(tr.token) > 0 {
req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
}
}
resp, err := tr.RoundTripper.RoundTrip(req)
resp, err := tr.base.RoundTrip(req)
if err != nil {
tr.mu.Lock()
delete(tr.modReq, orig)
@ -164,7 +164,7 @@ func (tr *authTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := tr.RoundTripper.(canceler); ok {
if cr, ok := tr.base.(canceler); ok {
tr.mu.Lock()
modReq := tr.modReq[req]
delete(tr.modReq, req)
@ -179,7 +179,7 @@ func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpo
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
// alongside all our requests.
if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" {
info, err := endpoint.ping()
info, err := endpoint.ping(context.TODO())
if err != nil {
return err
}
@ -213,7 +213,7 @@ func newSession(client *http.Client, endpoint *v1Endpoint) *session {
const defaultSearchLimit = 25
// searchRepositories performs a search against the remote repository
func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) {
func (r *session) searchRepositories(ctx context.Context, term string, limit int) (*registry.SearchResults, error) {
if limit == 0 {
limit = defaultSearchLimit
}
@ -221,9 +221,9 @@ func (r *session) searchRepositories(term string, limit int) (*registry.SearchRe
return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit)
}
u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit))
log.G(context.TODO()).WithField("url", u).Debug("searchRepositories")
log.G(ctx).WithField("url", u).Debug("searchRepositories")
req, err := http.NewRequest(http.MethodGet, u, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
return nil, invalidParamWrapf(err, "error building request")
}

View File

@ -24,6 +24,9 @@ type Service struct {
// an engine.
func NewService(options ServiceOptions) (*Service, error) {
config, err := newServiceConfig(options)
if err != nil {
return nil, err
}
return &Service{config: config}, err
}
@ -103,6 +106,8 @@ func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, use
// ResolveRepository splits a repository name into its components
// and configuration of the associated registry.
//
// Deprecated: this function was only used internally and is no longer used. It will be removed in the next release.
func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) {
s.mu.RLock()
defer s.mu.RUnlock()
@ -110,12 +115,30 @@ func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, erro
return newRepositoryInfo(s.config, name), nil
}
// ResolveAuthConfig looks up authentication for the given reference from the
// given authConfigs.
//
// IMPORTANT: This function is for internal use and should not be used by external projects.
func (s *Service) ResolveAuthConfig(authConfigs map[string]registry.AuthConfig, ref reference.Named) registry.AuthConfig {
s.mu.RLock()
defer s.mu.RUnlock()
// Simplified version of "newIndexInfo" without handling of insecure
// registries and mirrors, as we don't need that information to resolve
// the auth-config.
indexName := normalizeIndexName(reference.Domain(ref))
registryInfo, ok := s.config.IndexConfigs[indexName]
if !ok {
registryInfo = &registry.IndexInfo{Name: indexName}
}
return ResolveAuthConfig(authConfigs, registryInfo)
}
// APIEndpoint represents a remote API endpoint
type APIEndpoint struct {
Mirror bool
URL *url.URL
AllowNondistributableArtifacts bool // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release.
Official bool
Official bool // Deprecated: this field was only used internally, and will be removed in the next release.
TrimHostname bool // Deprecated: hostname is now trimmed unconditionally for remote names. This field will be removed in the next release.
TLSConfig *tls.Config
}

View File

@ -1,6 +1,7 @@
package registry // import "github.com/docker/docker/registry"
import (
"context"
"net/url"
"strings"
@ -8,6 +9,7 @@ import (
)
func (s *Service) lookupV2Endpoints(hostname string, includeMirrors bool) ([]APIEndpoint, error) {
ctx := context.TODO()
var endpoints []APIEndpoint
if hostname == DefaultNamespace || hostname == IndexHostname {
if includeMirrors {
@ -19,7 +21,8 @@ func (s *Service) lookupV2Endpoints(hostname string, includeMirrors bool) ([]API
if err != nil {
return nil, invalidParam(err)
}
mirrorTLSConfig, err := newTLSConfig(mirrorURL.Host, s.config.isSecureIndex(mirrorURL.Host))
// TODO(thaJeztah); this should all be memoized when loading the config. We're resolving mirrors and loading TLS config every time.
mirrorTLSConfig, err := newTLSConfig(ctx, mirrorURL.Host, s.config.isSecureIndex(mirrorURL.Host))
if err != nil {
return nil, err
}
@ -39,7 +42,7 @@ func (s *Service) lookupV2Endpoints(hostname string, includeMirrors bool) ([]API
return endpoints, nil
}
tlsConfig, err := newTLSConfig(hostname, s.config.isSecureIndex(hostname))
tlsConfig, err := newTLSConfig(ctx, hostname, s.config.isSecureIndex(hostname))
if err != nil {
return nil, err
}

202
vendor/github.com/moby/sys/atomicwriter/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,3 +1,5 @@
// Package atomicwriter provides utilities to perform atomic writes to a
// file or set of files.
package atomicwriter
import (
@ -6,41 +8,42 @@ import (
"io"
"os"
"path/filepath"
"syscall"
"github.com/moby/sys/sequential"
)
func validateDestination(fileName string) error {
if fileName == "" {
return errors.New("file name is empty")
}
if dir := filepath.Dir(fileName); dir != "" && dir != "." && dir != ".." {
di, err := os.Stat(dir)
if err != nil {
return fmt.Errorf("invalid output path: %w", err)
}
if !di.IsDir() {
return fmt.Errorf("invalid output path: %w", &os.PathError{Op: "stat", Path: dir, Err: syscall.ENOTDIR})
}
}
// Deliberately using Lstat here to match the behavior of [os.Rename],
// which is used when completing the write and does not resolve symlinks.
//
// TODO(thaJeztah): decide whether we want to disallow symlinks or to follow them.
if fi, err := os.Lstat(fileName); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to stat output path: %w", err)
fi, err := os.Lstat(fileName)
if err != nil {
if os.IsNotExist(err) {
return nil
}
} else if err := validateFileMode(fi.Mode()); err != nil {
return err
return fmt.Errorf("failed to stat output path: %w", err)
}
if dir := filepath.Dir(fileName); dir != "" && dir != "." {
if _, err := os.Stat(dir); errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("invalid file path: %w", err)
}
}
return nil
}
func validateFileMode(mode os.FileMode) error {
switch {
switch mode := fi.Mode(); {
case mode.IsRegular():
return nil // Regular file
case mode&os.ModeDir != 0:
return errors.New("cannot write to a directory")
// TODO(thaJeztah): decide whether we want to disallow symlinks or to follow them.
// case mode&os.ModeSymlink != 0:
// return errors.New("cannot write to a symbolic link directly")
case mode&os.ModeSymlink != 0:
return errors.New("cannot write to a symbolic link directly")
case mode&os.ModeNamedPipe != 0:
return errors.New("cannot write to a named pipe (FIFO)")
case mode&os.ModeSocket != 0:
@ -57,8 +60,7 @@ func validateFileMode(mode os.FileMode) error {
case mode&os.ModeSticky != 0:
return errors.New("cannot write to a sticky bit file")
default:
// Unknown file mode; let's assume it works
return nil
return fmt.Errorf("unknown file mode: %[1]s (%#[1]o)", mode)
}
}
@ -66,6 +68,13 @@ func validateFileMode(mode os.FileMode) error {
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
// NOTE: umask is not considered for the file's permissions.
//
// New uses [sequential.CreateTemp] to use sequential file access on Windows,
// avoiding depleting the standby list un-necessarily. On Linux, this equates to
// a regular [os.CreateTemp]. Refer to the [Win32 API documentation] for details
// on sequential file access.
//
// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN
func New(filename string, perm os.FileMode) (io.WriteCloser, error) {
if err := validateDestination(filename); err != nil {
return nil, err
@ -75,7 +84,7 @@ func New(filename string, perm os.FileMode) (io.WriteCloser, error) {
return nil, err
}
f, err := os.CreateTemp(filepath.Dir(abspath), ".tmp-"+filepath.Base(filename))
f, err := sequential.CreateTemp(filepath.Dir(abspath), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
@ -86,7 +95,12 @@ func New(filename string, perm os.FileMode) (io.WriteCloser, error) {
}, nil
}
// WriteFile atomically writes data to a file named by filename and with the specified permission bits.
// WriteFile atomically writes data to a file named by filename and with the
// specified permission bits. The given filename is created if it does not exist,
// but the destination directory must exist. It can be used as a drop-in replacement
// for [os.WriteFile], but currently does not allow the destination path to be
// a symlink. WriteFile is implemented using [New] for its implementation.
//
// NOTE: umask is not considered for the file's permissions.
func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := New(filename, perm)
@ -197,8 +211,15 @@ func (w syncFileCloser) Close() error {
// FileWriter opens a file writer inside the set. The file
// should be synced and closed before calling commit.
//
// FileWriter uses [sequential.OpenFile] to use sequential file access on Windows,
// avoiding depleting the standby list un-necessarily. On Linux, this equates to
// a regular [os.OpenFile]. Refer to the [Win32 API documentation] for details
// on sequential file access.
//
// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN
func (ws *WriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
f, err := sequential.OpenFile(filepath.Join(ws.root, name), flag, perm)
if err != nil {
return nil, err
}

6
vendor/modules.txt vendored
View File

@ -60,7 +60,7 @@ github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
github.com/docker/distribution/uuid
# github.com/docker/docker v28.0.4+incompatible
# github.com/docker/docker v28.0.5-0.20250407171702-185651d26bc6+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@ -90,7 +90,6 @@ github.com/docker/docker/errdefs
github.com/docker/docker/internal/lazyregexp
github.com/docker/docker/internal/multierror
github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/atomicwriter
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/idtools
github.com/docker/docker/pkg/ioutils
@ -212,6 +211,9 @@ github.com/moby/swarmkit/v2/api/defaults
github.com/moby/swarmkit/v2/api/genericresource
github.com/moby/swarmkit/v2/manager/raftselector
github.com/moby/swarmkit/v2/protobuf/plugin
# github.com/moby/sys/atomicwriter v0.1.0
## explicit; go 1.18
github.com/moby/sys/atomicwriter
# github.com/moby/sys/capability v0.4.0
## explicit; go 1.21
github.com/moby/sys/capability