summaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/docker/AUTHORS4
-rw-r--r--vendor/github.com/docker/docker/api/common.go2
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml93
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image_inspect.go5
-rw-r--r--vendor/github.com/docker/docker/api/types/image/opts.go5
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go28
-rw-r--r--vendor/github.com/docker/docker/api/types/system/info.go11
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go2
-rw-r--r--vendor/github.com/docker/docker/client/image_create.go2
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go11
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect_opts.go12
-rw-r--r--vendor/github.com/docker/docker/client/image_pull.go2
-rw-r--r--vendor/github.com/docker/docker/client/image_push.go5
-rw-r--r--vendor/github.com/docker/docker/client/image_tag.go2
-rw-r--r--vendor/github.com/docker/docker/client/request.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1507
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go259
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go107
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go126
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go69
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go430
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go56
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go281
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go95
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go497
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go130
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/dev_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go258
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff_unix.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path.go20
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path_deprecated.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path_windows.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time.go38
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_windows.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/utils.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported.go52
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go5
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools.go78
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go166
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go12
56 files changed, 752 insertions, 4083 deletions
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
index 88032de..a2e972e 100644
--- a/vendor/github.com/docker/docker/AUTHORS
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -293,6 +293,7 @@ Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon.philips@coreos.com>
Brandon Rhodes <brandon@rhodesmill.org>
Brendan Dixon <brendand@microsoft.com>
+Brendon Smith <bws@bws.bio>
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
Brent Salisbury <brent.salisbury@docker.com>
Brett Higgins <brhiggins@arbor.net>
@@ -347,6 +348,7 @@ Casey Bisson <casey.bisson@joyent.com>
Catalin Pirvu <pirvu.catalin94@gmail.com>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
+Cesar Talledo <cesar.talledo@docker.com>
Cezar Sa Espinola <cezarsa@gmail.com>
Chad Swenson <chadswen@gmail.com>
Chance Zibolski <chance.zibolski@gmail.com>
@@ -1281,6 +1283,7 @@ Krasi Georgiev <krasi@vip-consult.solutions>
Krasimir Georgiev <support@vip-consult.co.uk>
Kris-Mikael Krister <krismikael@protonmail.com>
Kristian Haugene <kristian.haugene@capgemini.com>
+Kristian Heljas <kristian@kristian.ee>
Kristina Zabunova <triara.xiii@gmail.com>
Krystian Wojcicki <kwojcicki@sympatico.ca>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
@@ -1712,6 +1715,7 @@ Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Patrik Cyvoct <patrik@ptrk.io>
+Patrik Leifert <patrikleifert@hotmail.com>
pattichen <craftsbear@gmail.com>
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
Paul <paul9869@gmail.com>
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
index 2c62cd4..d75c43d 100644
--- a/vendor/github.com/docker/docker/api/common.go
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
- DefaultVersion = "1.48"
+ DefaultVersion = "1.49"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 646032d..1183aaf 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
consumes:
- "application/json"
- "text/plain"
-basePath: "/v1.48"
+basePath: "/v1.49"
info:
title: "Docker Engine API"
- version: "1.48"
+ version: "1.49"
x-logo:
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
description: |
@@ -55,8 +55,8 @@ info:
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
- If you omit the version-prefix, the current version of the API (v1.48) is used.
- For example, calling `/info` is the same as calling `/v1.48/info`. Using the
+ If you omit the version-prefix, the current version of the API (v1.49) is used.
+ For example, calling `/info` is the same as calling `/v1.49/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@@ -6856,6 +6856,8 @@ definitions:
description: "The network pool size"
type: "integer"
example: "24"
+ FirewallBackend:
+ $ref: "#/definitions/FirewallInfo"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@@ -6939,6 +6941,37 @@ definitions:
default: "plugins.moby"
example: "plugins.moby"
+ FirewallInfo:
+ description: |
+ Information about the daemon's firewalling configuration.
+
+ This field is currently only used on Linux, and omitted on other platforms.
+ type: "object"
+ x-nullable: true
+ properties:
+ Driver:
+ description: |
+ The name of the firewall backend driver.
+ type: "string"
+ example: "nftables"
+ Info:
+ description: |
+ Information about the firewall backend, provided as
+ "label" / "value" pairs.
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including the
+ > formatting of values and labels, should not be considered stable,
+ > and may change without notice.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["ReloadedAt", "2025-01-01T00:00:00Z"]
+
# PluginsInfo is a temp struct holding Plugins name
# registered with docker daemon. It is used by Info struct
PluginsInfo:
@@ -6984,32 +7017,6 @@ definitions:
type: "object"
x-nullable: true
properties:
- AllowNondistributableArtifactsCIDRs:
- description: |
- List of IP ranges to which nondistributable artifacts can be pushed,
- using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
-
- <p><br /></p>
-
- > **Deprecated**: Pushing nondistributable artifacts is now always enabled
- > and this field is always `null`. This field will be removed in a API v1.49.
- type: "array"
- items:
- type: "string"
- example: []
- AllowNondistributableArtifactsHostnames:
- description: |
- List of registry hostnames to which nondistributable artifacts can be
- pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
-
- <p><br /></p>
-
- > **Deprecated**: Pushing nondistributable artifacts is now always enabled
- > and this field is always `null`. This field will be removed in a API v1.49.
- type: "array"
- items:
- type: "string"
- example: []
InsecureRegistryCIDRs:
description: |
List of IP ranges of insecure registries, using the CIDR syntax
@@ -7179,13 +7186,6 @@ definitions:
description: "Actual commit ID of external tool."
type: "string"
example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
- Expected:
- description: |
- Commit ID of external tool expected by dockerd as set at build time.
-
- **Deprecated**: This field is deprecated and will be omitted in a API v1.49.
- type: "string"
- example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
SwarmInfo:
description: |
@@ -10491,13 +10491,9 @@ paths:
### Image tarball format
- An image tarball contains one directory per image layer (named using its long ID), each containing these files:
-
- - `VERSION`: currently `1.0` - the file format version
- - `json`: detailed layer information, similar to `docker inspect layer_id`
- - `layer.tar`: A tarfile containing the filesystem changes in this layer
+ An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content).
- The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
+ Additionally, includes the manifest.json file associated with a backwards compatible docker save format.
If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
@@ -10537,6 +10533,7 @@ paths:
If not provided, the full multi-platform image will be saved.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
+ tags: ["Image"]
/images/get:
get:
summary: "Export several images"
@@ -10571,6 +10568,16 @@ paths:
type: "array"
items:
type: "string"
+ - name: "platform"
+ type: "string"
+ in: "query"
+ description: |
+ JSON encoded OCI platform describing a platform which will be used
+ to select a platform-specific image to be saved if the image is
+ multi-platform.
+ If not provided, the full multi-platform image will be saved.
+
+ Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
tags: ["Image"]
/images/load:
post:
diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
index 78e81f0..40d1f97 100644
--- a/vendor/github.com/docker/docker/api/types/image/image_inspect.go
+++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
@@ -128,11 +128,12 @@ type InspectResponse struct {
// compatibility.
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
- // Manifests is a list of image manifests available in this image. It
+ // Manifests is a list of image manifests available in this image. It
// provides a more detailed view of the platform-specific image manifests or
// other image-attached data like build attestations.
//
- // Only available if the daemon provides a multi-platform image store.
+ // Only available if the daemon provides a multi-platform image store, the client
+ // requests manifests AND does not request a specific platform.
//
// WARNING: This is experimental and may change at any time without any backward
// compatibility.
diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go
index 919510f..57800e0 100644
--- a/vendor/github.com/docker/docker/api/types/image/opts.go
+++ b/vendor/github.com/docker/docker/api/types/image/opts.go
@@ -106,6 +106,11 @@ type LoadOptions struct {
type InspectOptions struct {
// Manifests returns the image manifests.
Manifests bool
+
+ // Platform selects the specific platform of a multi-platform image to inspect.
+ //
+ // This option is only available for API version 1.49 and up.
+ Platform *ocispec.Platform
}
// SaveOptions holds parameters to save images.
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
index 8117cb0..14c82aa 100644
--- a/vendor/github.com/docker/docker/api/types/registry/registry.go
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -1,3 +1,6 @@
+// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
+//go:build go1.23
+
package registry // import "github.com/docker/docker/api/types/registry"
import (
@@ -15,23 +18,26 @@ type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
+
+ // ExtraFields is for internal use to include deprecated fields on older API versions.
+ ExtraFields map[string]any `json:"-"`
}
// MarshalJSON implements a custom marshaler to include legacy fields
// in API responses.
-func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
- tmp := map[string]interface{}{
- "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
- "IndexConfigs": sc.IndexConfigs,
- "Mirrors": sc.Mirrors,
- }
- if sc.AllowNondistributableArtifactsCIDRs != nil {
- tmp["AllowNondistributableArtifactsCIDRs"] = nil
+func (sc *ServiceConfig) MarshalJSON() ([]byte, error) {
+ type tmp ServiceConfig
+ base, err := json.Marshal((*tmp)(sc))
+ if err != nil {
+ return nil, err
}
- if sc.AllowNondistributableArtifactsHostnames != nil {
- tmp["AllowNondistributableArtifactsHostnames"] = nil
+ var merged map[string]any
+ _ = json.Unmarshal(base, &merged)
+
+ for k, v := range sc.ExtraFields {
+ merged[k] = v
}
- return json.Marshal(tmp)
+ return json.Marshal(merged)
}
// NetIPNet is the net.IPNet type, which can be marshalled and
diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go
index 8a2444d..27173d4 100644
--- a/vendor/github.com/docker/docker/api/types/system/info.go
+++ b/vendor/github.com/docker/docker/api/types/system/info.go
@@ -73,6 +73,7 @@ type Info struct {
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
+ FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
CDISpecDirs []string
Containerd *ContainerdInfo `json:",omitempty"`
@@ -143,7 +144,7 @@ type Commit struct {
// Expected is the commit ID of external tool expected by dockerd as set at build time.
//
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
- Expected string
+ Expected string `json:",omitempty"`
}
// NetworkAddressPool is a temp struct used by [Info] struct.
@@ -151,3 +152,11 @@ type NetworkAddressPool struct {
Base string
Size int
}
+
+// FirewallInfo describes the firewall backend.
+type FirewallInfo struct {
+ // Driver is the name of the firewall backend driver.
+ Driver string `json:"Driver"`
+ // Info is a list of label/value pairs, containing information related to the firewall.
+ Info [][2]string `json:"Info,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
index 9b46a1f..4838ac7 100644
--- a/vendor/github.com/docker/docker/client/container_commit.go
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
- repository = reference.FamiliarName(ref)
+ repository = ref.Name()
}
query := url.Values{}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
index 0357051..1aa061e 100644
--- a/vendor/github.com/docker/docker/client/image_create.go
+++ b/vendor/github.com/docker/docker/client/image_create.go
@@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti
}
query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("fromImage", ref.Name())
query.Set("tag", getAPITagFromNamedRef(ref))
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
index 1161195..d88f0f1 100644
--- a/vendor/github.com/docker/docker/client/image_inspect.go
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts
query.Set("manifests", "1")
}
+ if opts.apiOptions.Platform != nil {
+ if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil {
+ return image.InspectResponse{}, err
+ }
+ platform, err := encodePlatform(opts.apiOptions.Platform)
+ if err != nil {
+ return image.InspectResponse{}, err
+ }
+ query.Set("platform", platform)
+ }
+
resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/docker/docker/client/image_inspect_opts.go
index 2607f36..655cbf0 100644
--- a/vendor/github.com/docker/docker/client/image_inspect_opts.go
+++ b/vendor/github.com/docker/docker/client/image_inspect_opts.go
@@ -4,6 +4,7 @@ import (
"bytes"
"github.com/docker/docker/api/types/image"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageInspectOption is a type representing functional options for the image inspect operation.
@@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption {
})
}
+// ImageInspectWithPlatform sets platform API option for the image inspect operation.
+// This option is only available for API version 1.49 and up.
+// With this option set, the image inspect operation will return information for the
+// specified platform variant of the multi-platform image.
+func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption {
+ return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
+ clientOpts.apiOptions.Platform = platform
+ return nil
+ })
+}
+
// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
index 4286942..f5fe85d 100644
--- a/vendor/github.com/docker/docker/client/image_pull.go
+++ b/vendor/github.com/docker/docker/client/image_pull.go
@@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
}
query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("fromImage", ref.Name())
if !options.All {
query.Set("tag", getAPITagFromNamedRef(ref))
}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
index b340bc4..1a343f4 100644
--- a/vendor/github.com/docker/docker/client/image_push.go
+++ b/vendor/github.com/docker/docker/client/image_push.go
@@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
return nil, errors.New("cannot push a digest reference")
}
- name := reference.FamiliarName(ref)
query := url.Values{}
if !options.All {
ref = reference.TagNameOnly(ref)
@@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
query.Set("platform", string(pJson))
}
- resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
+ resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
if privilegeErr != nil {
return nil, privilegeErr
}
- resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
+ resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader)
}
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
index ea6b4a1..25c7360 100644
--- a/vendor/github.com/docker/docker/client/image_tag.go
+++ b/vendor/github.com/docker/docker/client/image_tag.go
@@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
ref = reference.TagNameOnly(ref)
query := url.Values{}
- query.Set("repo", reference.FamiliarName(ref))
+ query.Set("repo", ref.Name())
if tagged, ok := ref.(reference.Tagged); ok {
query.Set("tag", tagged.Tag())
}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 2b913aa..4cc6435 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -237,7 +237,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
}
var daemonErr error
- if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) {
+ if serverResp.Header.Get("Content-Type") == "application/json" {
var errorResponse types.ErrorResponse
if err := json.Unmarshal(body, &errorResponse); err != nil {
return errors.Wrap(err, "Error reading JSON")
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
deleted file mode 100644
index 9bbb11c..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive.go
+++ /dev/null
@@ -1,1507 +0,0 @@
-// Package archive provides helper functions for dealing with archive files.
-package archive
-
-import (
- "archive/tar"
- "bufio"
- "bytes"
- "compress/bzip2"
- "compress/gzip"
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "runtime/debug"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "time"
-
- "github.com/containerd/log"
- "github.com/docker/docker/pkg/idtools"
- "github.com/klauspost/compress/zstd"
- "github.com/moby/patternmatcher"
- "github.com/moby/sys/sequential"
-)
-
-// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
-// tar, but that do not have their own header entry.
-//
-// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not
-// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and
-// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755.
-//
-// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is
-// subject to change in Moby at any time -- image authors who require consistent or known directory permissions
-// should explicitly control them by ensuring that header entries exist for any applicable path.
-const ImpliedDirectoryMode = 0o755
-
-type (
- // Compression is the state represents if compressed or not.
- Compression int
- // WhiteoutFormat is the format of whiteouts unpacked
- WhiteoutFormat int
-
- // TarOptions wraps the tar options.
- TarOptions struct {
- IncludeFiles []string
- ExcludePatterns []string
- Compression Compression
- NoLchown bool
- IDMap idtools.IdentityMapping
- ChownOpts *idtools.Identity
- IncludeSourceDir bool
- // WhiteoutFormat is the expected on disk format for whiteout files.
- // This format will be converted to the standard format on pack
- // and from the standard format on unpack.
- WhiteoutFormat WhiteoutFormat
- // When unpacking, specifies whether overwriting a directory with a
- // non-directory is allowed and vice versa.
- NoOverwriteDirNonDir bool
- // For each include when creating an archive, the included name will be
- // replaced with the matching name from this map.
- RebaseNames map[string]string
- InUserNS bool
- // Allow unpacking to succeed in spite of failures to set extended
- // attributes on the unpacked files due to the destination filesystem
- // not supporting them or a lack of permissions. Extended attributes
- // were probably in the archive for a reason, so set this option at
- // your own peril.
- BestEffortXattrs bool
- }
-)
-
-// Archiver implements the Archiver interface and allows the reuse of most utility functions of
-// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
-// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
-type Archiver struct {
- Untar func(io.Reader, string, *TarOptions) error
- IDMapping idtools.IdentityMapping
-}
-
-// NewDefaultArchiver returns a new Archiver without any IdentityMapping
-func NewDefaultArchiver() *Archiver {
- return &Archiver{Untar: Untar}
-}
-
-// breakoutError is used to differentiate errors related to breaking out
-// When testing archive breakout in the unit tests, this error is expected
-// in order for the test to pass.
-type breakoutError error
-
-const (
- Uncompressed Compression = 0 // Uncompressed represents the uncompressed.
- Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm.
- Gzip Compression = 2 // Gzip is gzip compression algorithm.
- Xz Compression = 3 // Xz is xz compression algorithm.
- Zstd Compression = 4 // Zstd is zstd compression algorithm.
-)
-
-const (
- AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts
- OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard.
-)
-
-// IsArchivePath checks if the (possibly compressed) file at the given path
-// starts with a tar file header.
-func IsArchivePath(path string) bool {
- file, err := os.Open(path)
- if err != nil {
- return false
- }
- defer file.Close()
- rdr, err := DecompressStream(file)
- if err != nil {
- return false
- }
- defer rdr.Close()
- r := tar.NewReader(rdr)
- _, err = r.Next()
- return err == nil
-}
-
-const (
- zstdMagicSkippableStart = 0x184D2A50
- zstdMagicSkippableMask = 0xFFFFFFF0
-)
-
-var (
- bzip2Magic = []byte{0x42, 0x5A, 0x68}
- gzipMagic = []byte{0x1F, 0x8B, 0x08}
- xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
- zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
-)
-
-type matcher = func([]byte) bool
-
-func magicNumberMatcher(m []byte) matcher {
- return func(source []byte) bool {
- return bytes.HasPrefix(source, m)
- }
-}
-
-// zstdMatcher detects zstd compression algorithm.
-// Zstandard compressed data is made of one or more frames.
-// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
-// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
-func zstdMatcher() matcher {
- return func(source []byte) bool {
- if bytes.HasPrefix(source, zstdMagic) {
- // Zstandard frame
- return true
- }
- // skippable frame
- if len(source) < 8 {
- return false
- }
- // magic number from 0x184D2A50 to 0x184D2A5F.
- if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
- return true
- }
- return false
- }
-}
-
-// DetectCompression detects the compression algorithm of the source.
-func DetectCompression(source []byte) Compression {
- compressionMap := map[Compression]matcher{
- Bzip2: magicNumberMatcher(bzip2Magic),
- Gzip: magicNumberMatcher(gzipMagic),
- Xz: magicNumberMatcher(xzMagic),
- Zstd: zstdMatcher(),
- }
- for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
- fn := compressionMap[compression]
- if fn(source) {
- return compression
- }
- }
- return Uncompressed
-}
-
-func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
- args := []string{"xz", "-d", "-c", "-q"}
-
- return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
-}
-
-func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
- if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" {
- noPigz, err := strconv.ParseBool(noPigzEnv)
- if err != nil {
- log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
- }
- if noPigz {
- log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
- return gzip.NewReader(buf)
- }
- }
-
- unpigzPath, err := exec.LookPath("unpigz")
- if err != nil {
- log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library")
- return gzip.NewReader(buf)
- }
-
- log.G(ctx).Debugf("Using %s to decompress", unpigzPath)
-
- return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
-}
-
-type readCloserWrapper struct {
- io.Reader
- closer func() error
- closed atomic.Bool
-}
-
-func (r *readCloserWrapper) Close() error {
- if !r.closed.CompareAndSwap(false, true) {
- log.G(context.TODO()).Error("subsequent attempt to close readCloserWrapper")
- if log.GetLevel() >= log.DebugLevel {
- log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
- }
-
- return nil
- }
- if r.closer != nil {
- return r.closer()
- }
- return nil
-}
-
-var bufioReader32KPool = &sync.Pool{
- New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
-}
-
-type bufferedReader struct {
- buf *bufio.Reader
-}
-
-func newBufferedReader(r io.Reader) *bufferedReader {
- buf := bufioReader32KPool.Get().(*bufio.Reader)
- buf.Reset(r)
- return &bufferedReader{buf}
-}
-
-func (r *bufferedReader) Read(p []byte) (int, error) {
- if r.buf == nil {
- return 0, io.EOF
- }
- n, err := r.buf.Read(p)
- if err == io.EOF {
- r.buf.Reset(nil)
- bufioReader32KPool.Put(r.buf)
- r.buf = nil
- }
- return n, err
-}
-
-func (r *bufferedReader) Peek(n int) ([]byte, error) {
- if r.buf == nil {
- return nil, io.EOF
- }
- return r.buf.Peek(n)
-}
-
-// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
-func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
- buf := newBufferedReader(archive)
- bs, err := buf.Peek(10)
- if err != nil && err != io.EOF {
- // Note: we'll ignore any io.EOF error because there are some odd
- // cases where the layer.tar file will be empty (zero bytes) and
- // that results in an io.EOF from the Peek() call. So, in those
- // cases we'll just treat it as a non-compressed stream and
- // that means just create an empty layer.
- // See Issue 18170
- return nil, err
- }
-
- compression := DetectCompression(bs)
- switch compression {
- case Uncompressed:
- return &readCloserWrapper{
- Reader: buf,
- }, nil
- case Gzip:
- ctx, cancel := context.WithCancel(context.Background())
-
- gzReader, err := gzDecompress(ctx, buf)
- if err != nil {
- cancel()
- return nil, err
- }
- return &readCloserWrapper{
- Reader: gzReader,
- closer: func() error {
- cancel()
- return gzReader.Close()
- },
- }, nil
- case Bzip2:
- bz2Reader := bzip2.NewReader(buf)
- return &readCloserWrapper{
- Reader: bz2Reader,
- }, nil
- case Xz:
- ctx, cancel := context.WithCancel(context.Background())
-
- xzReader, err := xzDecompress(ctx, buf)
- if err != nil {
- cancel()
- return nil, err
- }
-
- return &readCloserWrapper{
- Reader: xzReader,
- closer: func() error {
- cancel()
- return xzReader.Close()
- },
- }, nil
- case Zstd:
- zstdReader, err := zstd.NewReader(buf)
- if err != nil {
- return nil, err
- }
- return &readCloserWrapper{
- Reader: zstdReader,
- closer: func() error {
- zstdReader.Close()
- return nil
- },
- }, nil
- default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- }
-}
-
-type nopWriteCloser struct {
- io.Writer
-}
-
-func (nopWriteCloser) Close() error { return nil }
-
-// CompressStream compresses the dest with specified compression algorithm.
-func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
- switch compression {
- case Uncompressed:
- return nopWriteCloser{dest}, nil
- case Gzip:
- return gzip.NewWriter(dest), nil
- case Bzip2, Xz:
- // archive/bzip2 does not support writing, and there is no xz support at all
- // However, this is not a problem as docker only currently generates gzipped tars
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- default:
- return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
- }
-}
-
-// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
-// modify the contents or header of an entry in the archive. If the file already
-// exists in the archive the TarModifierFunc will be called with the Header and
-// a reader which will return the files content. If the file does not exist both
-// header and content will be nil.
-type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
-
-// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
-// tar stream are modified if they match any of the keys in mods.
-func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
- pipeReader, pipeWriter := io.Pipe()
-
- go func() {
- tarReader := tar.NewReader(inputTarStream)
- tarWriter := tar.NewWriter(pipeWriter)
- defer inputTarStream.Close()
- defer tarWriter.Close()
-
- modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
- header, data, err := modifier(name, original, tarReader)
- switch {
- case err != nil:
- return err
- case header == nil:
- return nil
- }
-
- if header.Name == "" {
- header.Name = name
- }
- header.Size = int64(len(data))
- if err := tarWriter.WriteHeader(header); err != nil {
- return err
- }
- if len(data) != 0 {
- if _, err := tarWriter.Write(data); err != nil {
- return err
- }
- }
- return nil
- }
-
- var err error
- var originalHeader *tar.Header
- for {
- originalHeader, err = tarReader.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
-
- modifier, ok := mods[originalHeader.Name]
- if !ok {
- // No modifiers for this file, copy the header and data
- if err := tarWriter.WriteHeader(originalHeader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- if err := copyWithBuffer(tarWriter, tarReader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- continue
- }
- delete(mods, originalHeader.Name)
-
- if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- }
-
- // Apply the modifiers that haven't matched any files in the archive
- for name, modifier := range mods {
- if err := modify(name, nil, modifier, nil); err != nil {
- pipeWriter.CloseWithError(err)
- return
- }
- }
-
- pipeWriter.Close()
- }()
- return pipeReader
-}
-
-// Extension returns the extension of a file that uses the specified compression algorithm.
-func (compression *Compression) Extension() string {
- switch *compression {
- case Uncompressed:
- return "tar"
- case Bzip2:
- return "tar.bz2"
- case Gzip:
- return "tar.gz"
- case Xz:
- return "tar.xz"
- case Zstd:
- return "tar.zst"
- }
- return ""
-}
-
-// assert that we implement [tar.FileInfoNames].
-//
-// TODO(thaJeztah): disabled to allow compiling on < go1.23. un-comment once we drop support for older versions of go.
-// var _ tar.FileInfoNames = (*nosysFileInfo)(nil)
-
-// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
-// prevent tar.FileInfoHeader from introspecting it and potentially calling into
-// glibc.
-//
-// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader]
-// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102
-type nosysFileInfo struct {
- os.FileInfo
-}
-
-// Uname stubs out looking up username. It implements [tar.FileInfoNames]
-// to prevent [tar.FileInfoHeader] from loading libraries to perform
-// username lookups.
-func (fi nosysFileInfo) Uname() (string, error) {
- return "", nil
-}
-
-// Gname stubs out looking up group-name. It implements [tar.FileInfoNames]
-// to prevent [tar.FileInfoHeader] from loading libraries to perform
-// username lookups.
-func (fi nosysFileInfo) Gname() (string, error) {
- return "", nil
-}
-
-func (fi nosysFileInfo) Sys() interface{} {
- // A Sys value of type *tar.Header is safe as it is system-independent.
- // The tar.FileInfoHeader function copies the fields into the returned
- // header without performing any OS lookups.
- if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
- return sys
- }
- return nil
-}
-
-// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, hdr *tar.Header) error
-
-// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
-//
-// Compared to the archive/tar.FileInfoHeader function, this function is safe to
-// call from a chrooted process as it does not populate fields which would
-// require operating system lookups. It behaves identically to
-// tar.FileInfoHeader when fi is a FileInfo value returned from
-// tar.Header.FileInfo().
-//
-// When fi is a FileInfo for a native file, such as returned from os.Stat() and
-// os.Lstat(), the returned Header value differs from one returned from
-// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
-// set as OS lookups would be required to populate them. The AccessTime and
-// ChangeTime fields are not currently set (not yet implemented) although that
-// is subject to change. Callers which require the AccessTime or ChangeTime
-// fields to be zeroed should explicitly zero them out in the returned Header
-// value to avoid any compatibility issues in the future.
-func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
- if err != nil {
- return nil, err
- }
- if sysStat != nil {
- return hdr, sysStat(fi, hdr)
- }
- return hdr, nil
-}
-
-// FileInfoHeader creates a populated Header from fi.
-//
-// Compared to the archive/tar package, this function fills in less information
-// but is safe to call from a chrooted process. The AccessTime and ChangeTime
-// fields are not set in the returned header, ModTime is truncated to one-second
-// precision, and the Uname and Gname fields are only set when fi is a FileInfo
-// value returned from tar.Header.FileInfo().
-func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := FileInfoHeaderNoLookups(fi, link)
- if err != nil {
- return nil, err
- }
- hdr.Format = tar.FormatPAX
- hdr.ModTime = hdr.ModTime.Truncate(time.Second)
- hdr.AccessTime = time.Time{}
- hdr.ChangeTime = time.Time{}
- hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
- hdr.Name = canonicalTarName(name, fi.IsDir())
- return hdr, nil
-}
-
-const paxSchilyXattr = "SCHILY.xattr."
-
-// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
-// to a tar header
-func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
- const (
- // Values based on linux/include/uapi/linux/capability.h
- xattrCapsSz2 = 20
- versionOffset = 3
- vfsCapRevision2 = 2
- vfsCapRevision3 = 3
- )
- capability, _ := lgetxattr(path, "security.capability")
- if capability != nil {
- if capability[versionOffset] == vfsCapRevision3 {
- // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
- // sense outside the user namespace the archive is built in.
- capability[versionOffset] = vfsCapRevision2
- capability = capability[:xattrCapsSz2]
- }
- if hdr.PAXRecords == nil {
- hdr.PAXRecords = make(map[string]string)
- }
- hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
- }
- return nil
-}
-
-type tarWhiteoutConverter interface {
- ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
- ConvertRead(*tar.Header, string) (bool, error)
-}
-
-type tarAppender struct {
- TarWriter *tar.Writer
-
- // for hardlink mapping
- SeenFiles map[uint64]string
- IdentityMapping idtools.IdentityMapping
- ChownOpts *idtools.Identity
-
- // For packing and unpacking whiteout files in the
- // non standard format. The whiteout files defined
- // by the AUFS standard are used as the tar whiteout
- // standard.
- WhiteoutConverter tarWhiteoutConverter
-}
-
-func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
- return &tarAppender{
- SeenFiles: make(map[uint64]string),
- TarWriter: tar.NewWriter(writer),
- IdentityMapping: idMapping,
- ChownOpts: chownOpts,
- }
-}
-
-// canonicalTarName provides a platform-independent and consistent POSIX-style
-// path for files and directories to be archived regardless of the platform.
-func canonicalTarName(name string, isDir bool) string {
- name = filepath.ToSlash(name)
-
- // suffix with '/' for directories
- if isDir && !strings.HasSuffix(name, "/") {
- name += "/"
- }
- return name
-}
-
-// addTarFile adds to the tar archive a file from `path` as `name`
-func (ta *tarAppender) addTarFile(path, name string) error {
- fi, err := os.Lstat(path)
- if err != nil {
- return err
- }
-
- var link string
- if fi.Mode()&os.ModeSymlink != 0 {
- var err error
- link, err = os.Readlink(path)
- if err != nil {
- return err
- }
- }
-
- hdr, err := FileInfoHeader(name, fi, link)
- if err != nil {
- return err
- }
- if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
- return err
- }
-
- // if it's not a directory and has more than 1 link,
- // it's hard linked, so set the type flag accordingly
- if !fi.IsDir() && hasHardlinks(fi) {
- inode, err := getInodeFromStat(fi.Sys())
- if err != nil {
- return err
- }
- // a link should have a name that it links too
- // and that linked name should be first in the tar archive
- if oldpath, ok := ta.SeenFiles[inode]; ok {
- hdr.Typeflag = tar.TypeLink
- hdr.Linkname = oldpath
- hdr.Size = 0 // This Must be here for the writer math to add up!
- } else {
- ta.SeenFiles[inode] = name
- }
- }
-
- // check whether the file is overlayfs whiteout
- // if yes, skip re-mapping container ID mappings.
- isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
-
- // handle re-mapping container ID mappings back to host ID mappings before
- // writing tar headers/files. We skip whiteout files because they were written
- // by the kernel and already have proper ownership relative to the host
- if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
- fileIDPair, err := getFileUIDGID(fi.Sys())
- if err != nil {
- return err
- }
- hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
- if err != nil {
- return err
- }
- }
-
- // explicitly override with ChownOpts
- if ta.ChownOpts != nil {
- hdr.Uid = ta.ChownOpts.UID
- hdr.Gid = ta.ChownOpts.GID
- }
-
- if ta.WhiteoutConverter != nil {
- wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
- if err != nil {
- return err
- }
-
- // If a new whiteout file exists, write original hdr, then
- // replace hdr with wo to be written after. Whiteouts should
- // always be written after the original. Note the original
- // hdr may have been updated to be a whiteout with returning
- // a whiteout header
- if wo != nil {
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- return fmt.Errorf("tar: cannot use whiteout for non-empty file")
- }
- hdr = wo
- }
- }
-
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- return err
- }
-
- if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
- // We use sequential file access to avoid depleting the standby list on
- // Windows. On Linux, this equates to a regular os.Open.
- file, err := sequential.Open(path)
- if err != nil {
- return err
- }
-
- err = copyWithBuffer(ta.TarWriter, file)
- file.Close()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error {
- var (
- Lchown = true
- inUserns, bestEffortXattrs bool
- chownOpts *idtools.Identity
- )
-
- // TODO(thaJeztah): make opts a required argument.
- if opts != nil {
- Lchown = !opts.NoLchown
- inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally.
- chownOpts = opts.ChownOpts
- bestEffortXattrs = opts.BestEffortXattrs
- }
-
- // hdr.Mode is in linux format, which we can use for sycalls,
- // but for os.Foo() calls we need the mode converted to os.FileMode,
- // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
- hdrInfo := hdr.FileInfo()
-
- switch hdr.Typeflag {
- case tar.TypeDir:
- // Create directory unless it exists as a directory already.
- // In that case we just want to merge the two
- if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
- if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
-
- case tar.TypeReg:
- // Source is regular file. We use sequential file access to avoid depleting
- // the standby list on Windows. On Linux, this equates to a regular os.OpenFile.
- file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
- if err != nil {
- return err
- }
- if err := copyWithBuffer(file, reader); err != nil {
- _ = file.Close()
- return err
- }
- _ = file.Close()
-
- case tar.TypeBlock, tar.TypeChar:
- if inUserns { // cannot create devices in a userns
- log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns")
- return nil
- }
- // Handle this is an OS-specific way
- if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
- return err
- }
-
- case tar.TypeFifo:
- // Handle this is an OS-specific way
- if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
- if inUserns && errors.Is(err, syscall.EPERM) {
- // In most cases, cannot create a fifo if running in user namespace
- log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns")
- return nil
- }
- return err
- }
-
- case tar.TypeLink:
- // #nosec G305 -- The target path is checked for path traversal.
- targetPath := filepath.Join(extractDir, hdr.Linkname)
- // check for hardlink breakout
- if !strings.HasPrefix(targetPath, extractDir) {
- return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
- }
- if err := os.Link(targetPath, path); err != nil {
- return err
- }
-
- case tar.TypeSymlink:
- // path -> hdr.Linkname = targetPath
- // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
- targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal.
-
- // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
- // that symlink would first have to be created, which would be caught earlier, at this very check:
- if !strings.HasPrefix(targetPath, extractDir) {
- return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
- }
- if err := os.Symlink(hdr.Linkname, path); err != nil {
- return err
- }
-
- case tar.TypeXGlobalHeader:
- log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored")
- return nil
-
- default:
- return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
- }
-
- // Lchown is not supported on Windows.
- if Lchown && runtime.GOOS != "windows" {
- if chownOpts == nil {
- chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
- }
- if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
- var msg string
- if inUserns && errors.Is(err, syscall.EINVAL) {
- msg = " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)"
- }
- return fmt.Errorf("failed to Lchown %q for UID %d, GID %d%s: %w", path, hdr.Uid, hdr.Gid, msg, err)
- }
- }
-
- var xattrErrs []string
- for key, value := range hdr.PAXRecords {
- xattr, ok := strings.CutPrefix(key, paxSchilyXattr)
- if !ok {
- continue
- }
- if err := lsetxattr(path, xattr, []byte(value), 0); err != nil {
- if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) {
- // EPERM occurs if modifying xattrs is not allowed. This can
- // happen when running in userns with restrictions (ChromeOS).
- xattrErrs = append(xattrErrs, err.Error())
- continue
- }
- return err
- }
- }
-
- if len(xattrErrs) > 0 {
- log.G(context.TODO()).WithFields(log.Fields{
- "errors": xattrErrs,
- }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
- }
-
- // There is no LChmod, so ignore mode for symlink. Also, this
- // must happen after chown, as that can modify the file mode
- if err := handleLChmod(hdr, path, hdrInfo); err != nil {
- return err
- }
-
- aTime := boundTime(latestTime(hdr.AccessTime, hdr.ModTime))
- mTime := boundTime(hdr.ModTime)
-
- // chtimes doesn't support a NOFOLLOW flag atm
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := chtimes(path, aTime, mTime); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := chtimes(path, aTime, mTime); err != nil {
- return err
- }
- } else {
- if err := lchtimes(path, aTime, mTime); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Tar creates an archive from the directory at `path`, and returns it as a
-// stream of bytes.
-func Tar(path string, compression Compression) (io.ReadCloser, error) {
- return TarWithOptions(path, &TarOptions{Compression: compression})
-}
-
-// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
-// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
-func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
- tb, err := NewTarballer(srcPath, options)
- if err != nil {
- return nil, err
- }
- go tb.Do()
- return tb.Reader(), nil
-}
-
-// Tarballer is a lower-level interface to TarWithOptions which gives the caller
-// control over which goroutine the archiving operation executes on.
-type Tarballer struct {
- srcPath string
- options *TarOptions
- pm *patternmatcher.PatternMatcher
- pipeReader *io.PipeReader
- pipeWriter *io.PipeWriter
- compressWriter io.WriteCloser
- whiteoutConverter tarWhiteoutConverter
-}
-
-// NewTarballer constructs a new tarballer. The arguments are the same as for
-// TarWithOptions.
-func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) {
- pm, err := patternmatcher.New(options.ExcludePatterns)
- if err != nil {
- return nil, err
- }
-
- pipeReader, pipeWriter := io.Pipe()
-
- compressWriter, err := CompressStream(pipeWriter, options.Compression)
- if err != nil {
- return nil, err
- }
-
- return &Tarballer{
- // Fix the source path to work with long path names. This is a no-op
- // on platforms other than Windows.
- srcPath: addLongPathPrefix(srcPath),
- options: options,
- pm: pm,
- pipeReader: pipeReader,
- pipeWriter: pipeWriter,
- compressWriter: compressWriter,
- whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
- }, nil
-}
-
-// Reader returns the reader for the created archive.
-func (t *Tarballer) Reader() io.ReadCloser {
- return t.pipeReader
-}
-
-// Do performs the archiving operation in the background. The resulting archive
-// can be read from t.Reader(). Do should only be called once on each Tarballer
-// instance.
-func (t *Tarballer) Do() {
- ta := newTarAppender(
- t.options.IDMap,
- t.compressWriter,
- t.options.ChownOpts,
- )
- ta.WhiteoutConverter = t.whiteoutConverter
-
- defer func() {
- // Make sure to check the error on Close.
- if err := ta.TarWriter.Close(); err != nil {
- log.G(context.TODO()).Errorf("Can't close tar writer: %s", err)
- }
- if err := t.compressWriter.Close(); err != nil {
- log.G(context.TODO()).Errorf("Can't close compress writer: %s", err)
- }
- if err := t.pipeWriter.Close(); err != nil {
- log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err)
- }
- }()
-
- // In general we log errors here but ignore them because
- // during e.g. a diff operation the container can continue
- // mutating the filesystem and we can see transient errors
- // from this
-
- stat, err := os.Lstat(t.srcPath)
- if err != nil {
- return
- }
-
- if !stat.IsDir() {
- // We can't later join a non-dir with any includes because the
- // 'walk' will error if "file/." is stat-ed and "file" is not a
- // directory. So, we must split the source path and use the
- // basename as the include.
- if len(t.options.IncludeFiles) > 0 {
- log.G(context.TODO()).Warn("Tar: Can't archive a file with includes")
- }
-
- dir, base := SplitPathDirEntry(t.srcPath)
- t.srcPath = dir
- t.options.IncludeFiles = []string{base}
- }
-
- if len(t.options.IncludeFiles) == 0 {
- t.options.IncludeFiles = []string{"."}
- }
-
- seen := make(map[string]bool)
-
- for _, include := range t.options.IncludeFiles {
- rebaseName := t.options.RebaseNames[include]
-
- var (
- parentMatchInfo []patternmatcher.MatchInfo
- parentDirs []string
- )
-
- walkRoot := getWalkRoot(t.srcPath, include)
- filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error {
- if err != nil {
- log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err)
- return nil
- }
-
- relFilePath, err := filepath.Rel(t.srcPath, filePath)
- if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
- // Error getting relative path OR we are looking
- // at the source directory path. Skip in both situations.
- return nil
- }
-
- if t.options.IncludeSourceDir && include == "." && relFilePath != "." {
- relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
- }
-
- skip := false
-
- // If "include" is an exact match for the current file
- // then even if there's an "excludePatterns" pattern that
- // matches it, don't skip it. IOW, assume an explicit 'include'
- // is asking for that file no matter what - which is true
- // for some files, like .dockerignore and Dockerfile (sometimes)
- if include != relFilePath {
- for len(parentDirs) != 0 {
- lastParentDir := parentDirs[len(parentDirs)-1]
- if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) {
- break
- }
- parentDirs = parentDirs[:len(parentDirs)-1]
- parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
- }
-
- var matchInfo patternmatcher.MatchInfo
- if len(parentMatchInfo) != 0 {
- skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
- } else {
- skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{})
- }
- if err != nil {
- log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err)
- return err
- }
-
- if f.IsDir() {
- parentDirs = append(parentDirs, relFilePath)
- parentMatchInfo = append(parentMatchInfo, matchInfo)
- }
- }
-
- if skip {
- // If we want to skip this file and its a directory
- // then we should first check to see if there's an
- // excludes pattern (e.g. !dir/file) that starts with this
- // dir. If so then we can't skip this dir.
-
- // Its not a dir then so we can just return/skip.
- if !f.IsDir() {
- return nil
- }
-
- // No exceptions (!...) in patterns so just skip dir
- if !t.pm.Exclusions() {
- return filepath.SkipDir
- }
-
- dirSlash := relFilePath + string(filepath.Separator)
-
- for _, pat := range t.pm.Patterns() {
- if !pat.Exclusion() {
- continue
- }
- if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
- // found a match - so can't skip this dir
- return nil
- }
- }
-
- // No matching exclusion dir so just skip dir
- return filepath.SkipDir
- }
-
- if seen[relFilePath] {
- return nil
- }
- seen[relFilePath] = true
-
- // Rename the base resource.
- if rebaseName != "" {
- var replacement string
- if rebaseName != string(filepath.Separator) {
- // Special case the root directory to replace with an
- // empty string instead so that we don't end up with
- // double slashes in the paths.
- replacement = rebaseName
- }
-
- relFilePath = strings.Replace(relFilePath, include, replacement, 1)
- }
-
- if err := ta.addTarFile(filePath, relFilePath); err != nil {
- log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err)
- // if pipe is broken, stop writing tar stream to it
- if err == io.ErrClosedPipe {
- return err
- }
- }
- return nil
- })
- }
-}
-
-// Unpack unpacks the decompressedArchive to dest with options.
-func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
- tr := tar.NewReader(decompressedArchive)
-
- var dirs []*tar.Header
- whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
-
- // Iterate through the files in the archive.
-loop:
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- // end of tar archive
- break
- }
- if err != nil {
- return err
- }
-
- // ignore XGlobalHeader early to avoid creating parent directories for them
- if hdr.Typeflag == tar.TypeXGlobalHeader {
- log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name)
- continue
- }
-
- // Normalize name, for safety and for a simple is-root check
- // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
- // This keeps "..\" as-is, but normalizes "\..\" to "\".
- hdr.Name = filepath.Clean(hdr.Name)
-
- for _, exclude := range options.ExcludePatterns {
- if strings.HasPrefix(hdr.Name, exclude) {
- continue loop
- }
- }
-
- // Ensure that the parent directory exists.
- err = createImpliedDirectories(dest, hdr, options)
- if err != nil {
- return err
- }
-
- // #nosec G305 -- The joined path is checked for path traversal.
- path := filepath.Join(dest, hdr.Name)
- rel, err := filepath.Rel(dest, path)
- if err != nil {
- return err
- }
- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
- }
-
- // If path exits we almost always just want to remove and replace it
- // The only exception is when it is a directory *and* the file from
- // the layer is also a directory. Then we want to merge them (i.e.
- // just apply the metadata from the layer).
- if fi, err := os.Lstat(path); err == nil {
- if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
- // If NoOverwriteDirNonDir is true then we cannot replace
- // an existing directory with a non-directory from the archive.
- return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
- }
-
- if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
- // If NoOverwriteDirNonDir is true then we cannot replace
- // an existing non-directory with a directory from the archive.
- return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
- }
-
- if fi.IsDir() && hdr.Name == "." {
- continue
- }
-
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
- if err := os.RemoveAll(path); err != nil {
- return err
- }
- }
- }
-
- if err := remapIDs(options.IDMap, hdr); err != nil {
- return err
- }
-
- if whiteoutConverter != nil {
- writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
- if err != nil {
- return err
- }
- if !writeFile {
- continue
- }
- }
-
- if err := createTarFile(path, dest, hdr, tr, options); err != nil {
- return err
- }
-
- // Directory mtimes must be handled at the end to avoid further
- // file creation in them to modify the directory mtime
- if hdr.Typeflag == tar.TypeDir {
- dirs = append(dirs, hdr)
- }
- }
-
- for _, hdr := range dirs {
- // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
- path := filepath.Join(dest, hdr.Name)
-
- if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
- return err
- }
- }
- return nil
-}
-
-// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do
-// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is
-// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus
-// we most both create them and choose metadata like permissions.
-//
-// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS
-// on which the daemon is running. This precondition is required because this function assumes a OS-specific path
-// separator when checking that a path is not the root.
-func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error {
- // Not the root directory, ensure that the parent directory exists
- if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
- parent := filepath.Dir(hdr.Name)
- parentPath := filepath.Join(dest, parent)
- if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some
- // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche
- // usage that reduces the portability of an image.
- rootIDs := options.IDMap.RootPair()
-
- err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `dest`.
-// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
-//
-// FIXME: specify behavior when target path exists vs. doesn't exist.
-func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
- return untarHandler(tarArchive, dest, options, true)
-}
-
-// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `dest`.
-// The archive must be an uncompressed stream.
-func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
- return untarHandler(tarArchive, dest, options, false)
-}
-
-// Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
- if tarArchive == nil {
- return fmt.Errorf("Empty archive")
- }
- dest = filepath.Clean(dest)
- if options == nil {
- options = &TarOptions{}
- }
- if options.ExcludePatterns == nil {
- options.ExcludePatterns = []string{}
- }
-
- r := tarArchive
- if decompress {
- decompressedArchive, err := DecompressStream(tarArchive)
- if err != nil {
- return err
- }
- defer decompressedArchive.Close()
- r = decompressedArchive
- }
-
- return Unpack(r, dest, options)
-}
-
-// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
-// If either Tar or Untar fails, TarUntar aborts and returns the error.
-func (archiver *Archiver) TarUntar(src, dst string) error {
- archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
- if err != nil {
- return err
- }
- defer archive.Close()
- options := &TarOptions{
- IDMap: archiver.IDMapping,
- }
- return archiver.Untar(archive, dst, options)
-}
-
-// UntarPath untar a file from path to a destination, src is the source tar file path.
-func (archiver *Archiver) UntarPath(src, dst string) error {
- archive, err := os.Open(src)
- if err != nil {
- return err
- }
- defer archive.Close()
- options := &TarOptions{
- IDMap: archiver.IDMapping,
- }
- return archiver.Untar(archive, dst, options)
-}
-
-// CopyWithTar creates a tar archive of filesystem path `src`, and
-// unpacks it at filesystem path `dst`.
-// The archive is streamed directly with fixed buffering and no
-// intermediary disk IO.
-func (archiver *Archiver) CopyWithTar(src, dst string) error {
- srcSt, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !srcSt.IsDir() {
- return archiver.CopyFileWithTar(src, dst)
- }
-
- // if this Archiver is set up with ID mapping we need to create
- // the new destination directory with the remapped root UID/GID pair
- // as owner
- rootIDs := archiver.IDMapping.RootPair()
- // Create dst, copy src's content into it
- if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
- return err
- }
- return archiver.TarUntar(src, dst)
-}
-
-// CopyFileWithTar emulates the behavior of the 'cp' command-line
-// for a single file. It copies a regular file from path `src` to
-// path `dst`, and preserves all its metadata.
-func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
- srcSt, err := os.Stat(src)
- if err != nil {
- return err
- }
-
- if srcSt.IsDir() {
- return fmt.Errorf("Can't copy a directory")
- }
-
- // Clean up the trailing slash. This must be done in an operating
- // system specific manner.
- if dst[len(dst)-1] == os.PathSeparator {
- dst = filepath.Join(dst, filepath.Base(src))
- }
- // Create the holding directory if necessary
- if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
- return err
- }
-
- r, w := io.Pipe()
- errC := make(chan error, 1)
-
- go func() {
- defer close(errC)
-
- errC <- func() error {
- defer w.Close()
-
- srcF, err := os.Open(src)
- if err != nil {
- return err
- }
- defer srcF.Close()
-
- hdr, err := FileInfoHeaderNoLookups(srcSt, "")
- if err != nil {
- return err
- }
- hdr.Format = tar.FormatPAX
- hdr.ModTime = hdr.ModTime.Truncate(time.Second)
- hdr.AccessTime = time.Time{}
- hdr.ChangeTime = time.Time{}
- hdr.Name = filepath.Base(dst)
- hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
-
- if err := remapIDs(archiver.IDMapping, hdr); err != nil {
- return err
- }
-
- tw := tar.NewWriter(w)
- defer tw.Close()
- if err := tw.WriteHeader(hdr); err != nil {
- return err
- }
- if err := copyWithBuffer(tw, srcF); err != nil {
- return err
- }
- return nil
- }()
- }()
- defer func() {
- if er := <-errC; err == nil && er != nil {
- err = er
- }
- }()
-
- err = archiver.Untar(r, filepath.Dir(dst), nil)
- if err != nil {
- r.CloseWithError(err)
- }
- return err
-}
-
-// IdentityMapping returns the IdentityMapping of the archiver.
-func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
- return archiver.IDMapping
-}
-
-func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error {
- ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
- hdr.Uid, hdr.Gid = ids.UID, ids.GID
- return err
-}
-
-// cmdStream executes a command, and returns its stdout as a stream.
-// If the command fails to run or doesn't complete successfully, an error
-// will be returned, including anything written on stderr.
-func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
- cmd.Stdin = input
- pipeR, pipeW := io.Pipe()
- cmd.Stdout = pipeW
- var errBuf bytes.Buffer
- cmd.Stderr = &errBuf
-
- // Run the command and return the pipe
- if err := cmd.Start(); err != nil {
- return nil, err
- }
-
- // Ensure the command has exited before we clean anything up
- done := make(chan struct{})
-
- // Copy stdout to the returned pipe
- go func() {
- if err := cmd.Wait(); err != nil {
- pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
- } else {
- pipeW.Close()
- }
- close(done)
- }()
-
- return &readCloserWrapper{
- Reader: pipeR,
- closer: func() error {
- // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
- // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
- err := pipeR.Close()
- <-done
- return err
- },
- }, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go
new file mode 100644
index 0000000..5bdbdef
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go
@@ -0,0 +1,259 @@
+// Package archive provides helper functions for dealing with archive files.
+package archive
+
+import (
+ "archive/tar"
+ "io"
+ "os"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/moby/go-archive"
+ "github.com/moby/go-archive/compression"
+ "github.com/moby/go-archive/tarheader"
+)
+
+// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
+// tar, but that do not have their own header entry.
+//
+// Deprecated: use [archive.ImpliedDirectoryMode] instead.
+const ImpliedDirectoryMode = archive.ImpliedDirectoryMode
+
+type (
+ // Compression is the state represents if compressed or not.
+ //
+ // Deprecated: use [compression.Compression] instead.
+ Compression = compression.Compression
+ // WhiteoutFormat is the format of whiteouts unpacked
+ //
+ // Deprecated: use [archive.WhiteoutFormat] instead.
+ WhiteoutFormat = archive.WhiteoutFormat
+
+ // TarOptions wraps the tar options.
+ //
+ // Deprecated: use [archive.TarOptions] instead.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression compression.Compression
+ NoLchown bool
+ IDMap idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat archive.WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ // Allow unpacking to succeed in spite of failures to set extended
+ // attributes on the unpacked files due to the destination filesystem
+ // not supporting them or a lack of permissions. Extended attributes
+ // were probably in the archive for a reason, so set this option at
+ // your own peril.
+ BestEffortXattrs bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+//
+// Deprecated: use [archive.Archiver] instead.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMapping idtools.IdentityMapping
+}
+
+// NewDefaultArchiver returns a new Archiver without any IdentityMapping
+//
+// Deprecated: use [archive.NewDefaultArchiver] instead.
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar}
+}
+
+const (
+ Uncompressed = compression.None // Deprecated: use [compression.None] instead.
+ Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead.
+ Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead.
+ Xz = compression.Xz // Deprecated: use [compression.Xz] instead.
+ Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead.
+)
+
+const (
+ AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead.
+ OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead.
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+//
+// Deprecated: use [archive.IsArchivePath] instead.
+func IsArchivePath(path string) bool {
+ return archive.IsArchivePath(path)
+}
+
+// DetectCompression detects the compression algorithm of the source.
+//
+// Deprecated: use [compression.Detect] instead.
+func DetectCompression(source []byte) archive.Compression {
+ return compression.Detect(source)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+//
+// Deprecated: use [compression.DecompressStream] instead.
+func DecompressStream(arch io.Reader) (io.ReadCloser, error) {
+ return compression.DecompressStream(arch)
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+//
+// Deprecated: use [compression.CompressStream] instead.
+func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) {
+ return compression.CompressStream(dest, comp)
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper.
+//
+// Deprecated: use [archive.TarModifierFunc] instead.
+type TarModifierFunc = archive.TarModifierFunc
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream.
+//
+// Deprecated: use [archive.ReplaceFileTarWrapper] instead.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser {
+ return archive.ReplaceFileTarWrapper(inputTarStream, mods)
+}
+
+// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
+//
+// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead.
+func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
+ return tarheader.FileInfoHeaderNoLookups(fi, link)
+}
+
+// FileInfoHeader creates a populated Header from fi.
+//
+// Deprecated: use [archive.FileInfoHeader] instead.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ return archive.FileInfoHeader(name, fi, link)
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+//
+// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead.
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ return archive.ReadSecurityXattrToTarHeader(path, hdr)
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+//
+// Deprecated: use [archive.Tar] instead.
+func Tar(path string, compression archive.Compression) (io.ReadCloser, error) {
+ return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive with the given options.
+//
+// Deprecated: use [archive.TarWithOptions] instead.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+ return archive.TarWithOptions(srcPath, toArchiveOpt(options))
+}
+
+// Tarballer is a lower-level interface to TarWithOptions.
+//
+// Deprecated: use [archive.Tarballer] instead.
+type Tarballer = archive.Tarballer
+
+// NewTarballer constructs a new tarballer using TarWithOptions.
+//
+// Deprecated: use [archive.Tarballer] instead.
+func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) {
+ return archive.NewTarballer(srcPath, toArchiveOpt(options))
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+//
+// Deprecated: use [archive.Unpack] instead.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options))
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+//
+// Deprecated: use [archive.Untar] instead.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return archive.Untar(tarArchive, dest, toArchiveOpt(options))
+}
+
+// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+//
+// Deprecated: use [archive.UntarUncompressed] instead.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options))
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ return (&archive.Archiver{
+ Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
+ return archiver.Untar(reader, s, &TarOptions{
+ IDMap: archiver.IDMapping,
+ })
+ },
+ IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
+ }).TarUntar(src, dst)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ return (&archive.Archiver{
+ Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
+ return archiver.Untar(reader, s, &TarOptions{
+ IDMap: archiver.IDMapping,
+ })
+ },
+ IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
+ }).UntarPath(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ return (&archive.Archiver{
+ Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
+ return archiver.Untar(reader, s, nil)
+ },
+ IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
+ }).CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ return (&archive.Archiver{
+ Untar: func(reader io.Reader, s string, options *archive.TarOptions) error {
+ return archiver.Untar(reader, s, nil)
+ },
+ IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping),
+ }).CopyFileWithTar(src, dst)
+}
+
+// IdentityMapping returns the IdentityMapping of the archiver.
+func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
+ return archiver.IDMapping
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
deleted file mode 100644
index 7b6c3e0..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/moby/sys/userns"
- "golang.org/x/sys/unix"
-)
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- if format == OverlayWhiteoutFormat {
- return overlayWhiteoutConverter{}
- }
- return nil
-}
-
-type overlayWhiteoutConverter struct{}
-
-func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) {
- // convert whiteouts to AUFS format
- if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
- // we just rename the file and make it normal
- dir, filename := filepath.Split(hdr.Name)
- hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
- hdr.Mode = 0o600
- hdr.Typeflag = tar.TypeReg
- hdr.Size = 0
- }
-
- if fi.Mode()&os.ModeDir == 0 {
- // FIXME(thaJeztah): return a sentinel error instead of nil, nil
- return nil, nil
- }
-
- opaqueXattrName := "trusted.overlay.opaque"
- if userns.RunningInUserNS() {
- opaqueXattrName = "user.overlay.opaque"
- }
-
- // convert opaque dirs to AUFS format by writing an empty file with the prefix
- opaque, err := lgetxattr(path, opaqueXattrName)
- if err != nil {
- return nil, err
- }
- if len(opaque) != 1 || opaque[0] != 'y' {
- // FIXME(thaJeztah): return a sentinel error instead of nil, nil
- return nil, nil
- }
- delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName)
-
- // create a header for the whiteout file
- // it should inherit some properties from the parent, but be a regular file
- return &tar.Header{
- Typeflag: tar.TypeReg,
- Mode: hdr.Mode & int64(os.ModePerm),
- Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted.
- Size: 0,
- Uid: hdr.Uid,
- Uname: hdr.Uname,
- Gid: hdr.Gid,
- Gname: hdr.Gname,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- }, nil
-}
-
-func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
- base := filepath.Base(path)
- dir := filepath.Dir(path)
-
- // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
- if base == WhiteoutOpaqueDir {
- opaqueXattrName := "trusted.overlay.opaque"
- if userns.RunningInUserNS() {
- opaqueXattrName = "user.overlay.opaque"
- }
-
- err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0)
- if err != nil {
- return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err)
- }
- // don't write the file itself
- return false, err
- }
-
- // if a file was deleted and we are using overlay, we need to create a character device
- if strings.HasPrefix(base, WhiteoutPrefix) {
- originalBase := base[len(WhiteoutPrefix):]
- originalPath := filepath.Join(dir, originalBase)
-
- if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
- return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err)
- }
- if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
- return false, err
- }
-
- // don't write the file itself
- return false, nil
- }
-
- return true, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
deleted file mode 100644
index 6495549..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !linux
-
-package archive
-
-func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
deleted file mode 100644
index bc6b25a..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
+++ /dev/null
@@ -1,126 +0,0 @@
-//go:build !windows
-
-package archive
-
-import (
- "archive/tar"
- "errors"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "syscall"
-
- "github.com/docker/docker/pkg/idtools"
- "golang.org/x/sys/unix"
-)
-
-func init() {
- sysStat = statUnix
-}
-
-// addLongPathPrefix adds the Windows long path prefix to the path provided if
-// it does not already have it. It is a no-op on platforms other than Windows.
-func addLongPathPrefix(srcPath string) string {
- return srcPath
-}
-
-// getWalkRoot calculates the root path when performing a TarWithOptions.
-// We use a separate function as this is platform specific. On Linux, we
-// can't use filepath.Join(srcPath,include) because this will clean away
-// a trailing "." or "/" which may be important.
-func getWalkRoot(srcPath string, include string) string {
- return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
-}
-
-// chmodTarEntry is used to adjust the file permissions used in tar header based
-// on the platform the archival is done.
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- return perm // noop for unix as golang APIs provide perm bits correctly
-}
-
-// statUnix populates hdr from system-dependent fields of fi without performing
-// any OS lookups.
-func statUnix(fi os.FileInfo, hdr *tar.Header) error {
- // Devmajor and Devminor are only needed for special devices.
-
- // In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
- // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
- // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
-
- // ZFS in particular does not override the default:
- // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
-
- // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
- // Such large values cannot be encoded in a tar header.
- if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
- return nil
- }
- s, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
- }
-
- hdr.Uid = int(s.Uid)
- hdr.Gid = int(s.Gid)
-
- if s.Mode&unix.S_IFBLK != 0 ||
- s.Mode&unix.S_IFCHR != 0 {
- hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
- hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
- }
-
- return nil
-}
-
-func getInodeFromStat(stat interface{}) (uint64, error) {
- s, ok := stat.(*syscall.Stat_t)
- if !ok {
- // FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897
- return 0, nil
- }
- return s.Ino, nil
-}
-
-func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
- s, ok := stat.(*syscall.Stat_t)
-
- if !ok {
- return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
- }
- return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo.
-//
-// Creating device nodes is not supported when running in a user namespace,
-// produces a [syscall.EPERM] in most cases.
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- mode := uint32(hdr.Mode & 0o7777)
- switch hdr.Typeflag {
- case tar.TypeBlock:
- mode |= unix.S_IFBLK
- case tar.TypeChar:
- mode |= unix.S_IFCHR
- case tar.TypeFifo:
- mode |= unix.S_IFIFO
- }
-
- return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
deleted file mode 100644
index fd2546e..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/pkg/idtools"
-)
-
-// longPathPrefix is the longpath prefix for Windows file paths.
-const longPathPrefix = `\\?\`
-
-// addLongPathPrefix adds the Windows long path prefix to the path provided if
-// it does not already have it. It is a no-op on platforms other than Windows.
-//
-// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix].
-func addLongPathPrefix(srcPath string) string {
- if strings.HasPrefix(srcPath, longPathPrefix) {
- return srcPath
- }
- if strings.HasPrefix(srcPath, `\\`) {
- // This is a UNC path, so we need to add 'UNC' to the path as well.
- return longPathPrefix + `UNC` + srcPath[1:]
- }
- return longPathPrefix + srcPath
-}
-
-// getWalkRoot calculates the root path when performing a TarWithOptions.
-// We use a separate function as this is platform specific.
-func getWalkRoot(srcPath string, include string) string {
- return filepath.Join(srcPath, include)
-}
-
-// chmodTarEntry is used to adjust the file permissions used in tar header based
-// on the platform the archival is done.
-func chmodTarEntry(perm os.FileMode) os.FileMode {
- // Remove group- and world-writable bits.
- perm &= 0o755
-
- // Add the x bit: make everything +x on Windows
- return perm | 0o111
-}
-
-func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
- // do nothing. no notion of Rdev, Nlink in stat on Windows
- return
-}
-
-func getInodeFromStat(stat interface{}) (uint64, error) {
- // do nothing. no notion of Inode in stat on Windows
- return 0, nil
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- return nil
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
- return nil
-}
-
-func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
- // no notion of file ownership mapping yet on Windows
- return idtools.Identity{UID: 0, GID: 0}, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
deleted file mode 100644
index 1c0509d..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes.go
+++ /dev/null
@@ -1,430 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "bytes"
- "context"
- "fmt"
- "io"
- "io/fs"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "github.com/containerd/log"
- "github.com/docker/docker/pkg/idtools"
-)
-
-// ChangeType represents the change type.
-type ChangeType int
-
-const (
- ChangeModify = 0 // ChangeModify represents the modify operation.
- ChangeAdd = 1 // ChangeAdd represents the add operation.
- ChangeDelete = 2 // ChangeDelete represents the delete operation.
-)
-
-func (c ChangeType) String() string {
- switch c {
- case ChangeModify:
- return "C"
- case ChangeAdd:
- return "A"
- case ChangeDelete:
- return "D"
- }
- return ""
-}
-
-// Change represents a change, it wraps the change type and path.
-// It describes changes of the files in the path respect to the
-// parent layers. The change could be modify, add, delete.
-// This is used for layer diff.
-type Change struct {
- Path string
- Kind ChangeType
-}
-
-func (change *Change) String() string {
- return fmt.Sprintf("%s %s", change.Kind, change.Path)
-}
-
-// for sort.Sort
-type changesByPath []Change
-
-func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
-func (c changesByPath) Len() int { return len(c) }
-func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
-
-// Gnu tar doesn't have sub-second mtime precision. The go tar
-// writer (1.10+) does when using PAX format, but we round times to seconds
-// to ensure archives have the same hashes for backwards compatibility.
-// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
-//
-// Non-sub-second is problematic when we apply changes via tar
-// files. We handle this by comparing for exact times, *or* same
-// second count and either a or b having exactly 0 nanoseconds
-func sameFsTime(a, b time.Time) bool {
- return a.Equal(b) ||
- (a.Unix() == b.Unix() &&
- (a.Nanosecond() == 0 || b.Nanosecond() == 0))
-}
-
-// Changes walks the path rw and determines changes for the files in the path,
-// with respect to the parent layers
-func Changes(layers []string, rw string) ([]Change, error) {
- return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
-}
-
-func aufsMetadataSkip(path string) (skip bool, err error) {
- skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
- if err != nil {
- skip = true
- }
- return skip, err
-}
-
-func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
- f := filepath.Base(path)
-
- // If there is a whiteout, then the file was removed
- if strings.HasPrefix(f, WhiteoutPrefix) {
- originalFile := f[len(WhiteoutPrefix):]
- return filepath.Join(filepath.Dir(path), originalFile), nil
- }
-
- return "", nil
-}
-
-type (
- skipChange func(string) (bool, error)
- deleteChange func(string, string, os.FileInfo) (string, error)
-)
-
-func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
- var (
- changes []Change
- changedDirs = make(map[string]struct{})
- )
-
- err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Rebase path
- path, err = filepath.Rel(rw, path)
- if err != nil {
- return err
- }
-
- // As this runs on the daemon side, file paths are OS specific.
- path = filepath.Join(string(os.PathSeparator), path)
-
- // Skip root
- if path == string(os.PathSeparator) {
- return nil
- }
-
- if sc != nil {
- if skip, err := sc(path); skip {
- return err
- }
- }
-
- change := Change{
- Path: path,
- }
-
- deletedFile, err := dc(rw, path, f)
- if err != nil {
- return err
- }
-
- // Find out what kind of modification happened
- if deletedFile != "" {
- change.Path = deletedFile
- change.Kind = ChangeDelete
- } else {
- // Otherwise, the file was added
- change.Kind = ChangeAdd
-
- // ...Unless it already existed in a top layer, in which case, it's a modification
- for _, layer := range layers {
- stat, err := os.Stat(filepath.Join(layer, path))
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- if err == nil {
- // The file existed in the top layer, so that's a modification
-
- // However, if it's a directory, maybe it wasn't actually modified.
- // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
- if stat.IsDir() && f.IsDir() {
- if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
- // Both directories are the same, don't record the change
- return nil
- }
- }
- change.Kind = ChangeModify
- break
- }
- }
- }
-
- // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
- // This block is here to ensure the change is recorded even if the
- // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
- // Check https://github.com/docker/docker/pull/13590 for details.
- if f.IsDir() {
- changedDirs[path] = struct{}{}
- }
- if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
- parent := filepath.Dir(path)
- if _, ok := changedDirs[parent]; !ok && parent != "/" {
- changes = append(changes, Change{Path: parent, Kind: ChangeModify})
- changedDirs[parent] = struct{}{}
- }
- }
-
- // Record change
- changes = append(changes, change)
- return nil
- })
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- return changes, nil
-}
-
-// FileInfo describes the information of a file.
-type FileInfo struct {
- parent *FileInfo
- name string
- stat fs.FileInfo
- children map[string]*FileInfo
- capability []byte
- added bool
-}
-
-// LookUp looks up the file information of a file.
-func (info *FileInfo) LookUp(path string) *FileInfo {
- // As this runs on the daemon side, file paths are OS specific.
- parent := info
- if path == string(os.PathSeparator) {
- return info
- }
-
- pathElements := strings.Split(path, string(os.PathSeparator))
- for _, elem := range pathElements {
- if elem != "" {
- child := parent.children[elem]
- if child == nil {
- return nil
- }
- parent = child
- }
- }
- return parent
-}
-
-func (info *FileInfo) path() string {
- if info.parent == nil {
- // As this runs on the daemon side, file paths are OS specific.
- return string(os.PathSeparator)
- }
- return filepath.Join(info.parent.path(), info.name)
-}
-
-func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
- sizeAtEntry := len(*changes)
-
- if oldInfo == nil {
- // add
- change := Change{
- Path: info.path(),
- Kind: ChangeAdd,
- }
- *changes = append(*changes, change)
- info.added = true
- }
-
- // We make a copy so we can modify it to detect additions
- // also, we only recurse on the old dir if the new info is a directory
- // otherwise any previous delete/change is considered recursive
- oldChildren := make(map[string]*FileInfo)
- if oldInfo != nil && info.isDir() {
- for k, v := range oldInfo.children {
- oldChildren[k] = v
- }
- }
-
- for name, newChild := range info.children {
- oldChild := oldChildren[name]
- if oldChild != nil {
- // change?
- oldStat := oldChild.stat
- newStat := newChild.stat
- // Note: We can't compare inode or ctime or blocksize here, because these change
- // when copying a file into a container. However, that is not generally a problem
- // because any content change will change mtime, and any status change should
- // be visible when actually comparing the stat fields. The only time this
- // breaks down is if some code intentionally hides a change by setting
- // back mtime
- if statDifferent(oldStat, newStat) ||
- !bytes.Equal(oldChild.capability, newChild.capability) {
- change := Change{
- Path: newChild.path(),
- Kind: ChangeModify,
- }
- *changes = append(*changes, change)
- newChild.added = true
- }
-
- // Remove from copy so we can detect deletions
- delete(oldChildren, name)
- }
-
- newChild.addChanges(oldChild, changes)
- }
- for _, oldChild := range oldChildren {
- // delete
- change := Change{
- Path: oldChild.path(),
- Kind: ChangeDelete,
- }
- *changes = append(*changes, change)
- }
-
- // If there were changes inside this directory, we need to add it, even if the directory
- // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
- // As this runs on the daemon side, file paths are OS specific.
- if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
- change := Change{
- Path: info.path(),
- Kind: ChangeModify,
- }
- // Let's insert the directory entry before the recently added entries located inside this dir
- *changes = append(*changes, change) // just to resize the slice, will be overwritten
- copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
- (*changes)[sizeAtEntry] = change
- }
-}
-
-// Changes add changes to file information.
-func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
- var changes []Change
-
- info.addChanges(oldInfo, &changes)
-
- return changes
-}
-
-func newRootFileInfo() *FileInfo {
- // As this runs on the daemon side, file paths are OS specific.
- root := &FileInfo{
- name: string(os.PathSeparator),
- children: make(map[string]*FileInfo),
- }
- return root
-}
-
-// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
-// If oldDir is "", then all files in newDir will be Add-Changes.
-func ChangesDirs(newDir, oldDir string) ([]Change, error) {
- var oldRoot, newRoot *FileInfo
- if oldDir == "" {
- emptyDir, err := os.MkdirTemp("", "empty")
- if err != nil {
- return nil, err
- }
- defer os.Remove(emptyDir)
- oldDir = emptyDir
- }
- oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
- if err != nil {
- return nil, err
- }
-
- return newRoot.Changes(oldRoot), nil
-}
-
-// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
-func ChangesSize(newDir string, changes []Change) int64 {
- var (
- size int64
- sf = make(map[uint64]struct{})
- )
- for _, change := range changes {
- if change.Kind == ChangeModify || change.Kind == ChangeAdd {
- file := filepath.Join(newDir, change.Path)
- fileInfo, err := os.Lstat(file)
- if err != nil {
- log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err)
- continue
- }
-
- if fileInfo != nil && !fileInfo.IsDir() {
- if hasHardlinks(fileInfo) {
- inode := getIno(fileInfo)
- if _, ok := sf[inode]; !ok {
- size += fileInfo.Size()
- sf[inode] = struct{}{}
- }
- } else {
- size += fileInfo.Size()
- }
- }
- }
- }
- return size
-}
-
-// ExportChanges produces an Archive from the provided changes, relative to dir.
-func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
- reader, writer := io.Pipe()
- go func() {
- ta := newTarAppender(idMap, writer, nil)
-
- sort.Sort(changesByPath(changes))
-
- // In general we log errors here but ignore them because
- // during e.g. a diff operation the container can continue
- // mutating the filesystem and we can see transient errors
- // from this
- for _, change := range changes {
- if change.Kind == ChangeDelete {
- whiteOutDir := filepath.Dir(change.Path)
- whiteOutBase := filepath.Base(change.Path)
- whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
- timestamp := time.Now()
- hdr := &tar.Header{
- Name: whiteOut[1:],
- Size: 0,
- ModTime: timestamp,
- AccessTime: timestamp,
- ChangeTime: timestamp,
- }
- if err := ta.TarWriter.WriteHeader(hdr); err != nil {
- log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err)
- }
- } else {
- path := filepath.Join(dir, change.Path)
- if err := ta.addTarFile(path, change.Path[1:]); err != nil {
- log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err)
- }
- }
- }
-
- // Make sure to check the error on Close.
- if err := ta.TarWriter.Close(); err != nil {
- log.G(context.TODO()).Debugf("Can't close layer: %s", err)
- }
- if err := writer.Close(); err != nil {
- log.G(context.TODO()).Debugf("failed close Changes writer: %s", err)
- }
- }()
- return reader, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go
new file mode 100644
index 0000000..48c7523
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go
@@ -0,0 +1,56 @@
+package archive
+
+import (
+ "io"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/moby/go-archive"
+)
+
+// ChangeType represents the change
+//
+// Deprecated: use [archive.ChangeType] instead.
+type ChangeType = archive.ChangeType
+
+const (
+ ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead.
+ ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead.
+ ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead.
+)
+
+// Change represents a change.
+//
+// Deprecated: use [archive.Change] instead.
+type Change = archive.Change
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+//
+// Deprecated: use [archive.Changes] instead.
+func Changes(layers []string, rw string) ([]archive.Change, error) {
+ return archive.Changes(layers, rw)
+}
+
+// FileInfo describes the information of a file.
+//
+// Deprecated: use [archive.FileInfo] instead.
+type FileInfo = archive.FileInfo
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+//
+// Deprecated: use [archive.ChangesDirs] instead.
+func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) {
+ return archive.ChangesDirs(newDir, oldDir)
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+//
+// Deprecated: use [archive.ChangesSize] instead.
+func ChangesSize(newDir string, changes []archive.Change) int64 {
+ return archive.ChangesSize(newDir, changes)
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
+ return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap))
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
deleted file mode 100644
index 9a041b0..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package archive
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// walker is used to implement collectFileInfoForChanges on linux. Where this
-// method in general returns the entire contents of two directory trees, we
-// optimize some FS calls out on linux. In particular, we take advantage of the
-// fact that getdents(2) returns the inode of each file in the directory being
-// walked, which, when walking two trees in parallel to generate a list of
-// changes, can be used to prune subtrees without ever having to lstat(2) them
-// directly. Eliminating stat calls in this way can save up to seconds on large
-// images.
-type walker struct {
- dir1 string
- dir2 string
- root1 *FileInfo
- root2 *FileInfo
-}
-
-// collectFileInfoForChanges returns a complete representation of the trees
-// rooted at dir1 and dir2, with one important exception: any subtree or
-// leaf where the inode and device numbers are an exact match between dir1
-// and dir2 will be pruned from the results. This method is *only* to be used
-// to generating a list of changes between the two directories, as it does not
-// reflect the full contents.
-func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
- w := &walker{
- dir1: dir1,
- dir2: dir2,
- root1: newRootFileInfo(),
- root2: newRootFileInfo(),
- }
-
- i1, err := os.Lstat(w.dir1)
- if err != nil {
- return nil, nil, err
- }
- i2, err := os.Lstat(w.dir2)
- if err != nil {
- return nil, nil, err
- }
-
- if err := w.walk("/", i1, i2); err != nil {
- return nil, nil, err
- }
-
- return w.root1, w.root2, nil
-}
-
-// Given a FileInfo, its path info, and a reference to the root of the tree
-// being constructed, register this file with the tree.
-func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
- if fi == nil {
- return nil
- }
- parent := root.LookUp(filepath.Dir(path))
- if parent == nil {
- return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
- }
- info := &FileInfo{
- name: filepath.Base(path),
- children: make(map[string]*FileInfo),
- parent: parent,
- }
- cpath := filepath.Join(dir, path)
- info.stat = fi
- info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
- parent.children[info.name] = info
- return nil
-}
-
-// Walk a subtree rooted at the same path in both trees being iterated. For
-// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
-func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
- // Register these nodes with the return trees, unless we're still at the
- // (already-created) roots:
- if path != "/" {
- if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
- return err
- }
- if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
- return err
- }
- }
-
- is1Dir := i1 != nil && i1.IsDir()
- is2Dir := i2 != nil && i2.IsDir()
-
- sameDevice := false
- if i1 != nil && i2 != nil {
- si1 := i1.Sys().(*syscall.Stat_t)
- si2 := i2.Sys().(*syscall.Stat_t)
- if si1.Dev == si2.Dev {
- sameDevice = true
- }
- }
-
- // If these files are both non-existent, or leaves (non-dirs), we are done.
- if !is1Dir && !is2Dir {
- return nil
- }
-
- // Fetch the names of all the files contained in both directories being walked:
- var names1, names2 []nameIno
- if is1Dir {
- names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
- if err != nil {
- return err
- }
- }
- if is2Dir {
- names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
- if err != nil {
- return err
- }
- }
-
- // We have lists of the files contained in both parallel directories, sorted
- // in the same order. Walk them in parallel, generating a unique merged list
- // of all items present in either or both directories.
- var names []string
- ix1 := 0
- ix2 := 0
-
- for {
- if ix1 >= len(names1) {
- break
- }
- if ix2 >= len(names2) {
- break
- }
-
- ni1 := names1[ix1]
- ni2 := names2[ix2]
-
- switch strings.Compare(ni1.name, ni2.name) {
- case -1: // ni1 < ni2 -- advance ni1
- // we will not encounter ni1 in names2
- names = append(names, ni1.name)
- ix1++
- case 0: // ni1 == ni2
- if ni1.ino != ni2.ino || !sameDevice {
- names = append(names, ni1.name)
- }
- ix1++
- ix2++
- case 1: // ni1 > ni2 -- advance ni2
- // we will not encounter ni2 in names1
- names = append(names, ni2.name)
- ix2++
- }
- }
- for ix1 < len(names1) {
- names = append(names, names1[ix1].name)
- ix1++
- }
- for ix2 < len(names2) {
- names = append(names, names2[ix2].name)
- ix2++
- }
-
- // For each of the names present in either or both of the directories being
- // iterated, stat the name under each root, and recurse the pair of them:
- for _, name := range names {
- fname := filepath.Join(path, name)
- var cInfo1, cInfo2 os.FileInfo
- if is1Dir {
- cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- if is2Dir {
- cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
- if err = w.walk(fname, cInfo1, cInfo2); err != nil {
- return err
- }
- }
- return nil
-}
-
-// {name,inode} pairs used to support the early-pruning logic of the walker type
-type nameIno struct {
- name string
- ino uint64
-}
-
-type nameInoSlice []nameIno
-
-func (s nameInoSlice) Len() int { return len(s) }
-func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
-
-// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
-// numbers further up the stack when reading directory contents. Unlike
-// os.Readdirnames, which returns a list of filenames, this function returns a
-// list of {filename,inode} pairs.
-func readdirnames(dirname string) (names []nameIno, err error) {
- var (
- size = 100
- buf = make([]byte, 4096)
- nbuf int
- bufp int
- nb int
- )
-
- f, err := os.Open(dirname)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- names = make([]nameIno, 0, size) // Empty with room to grow.
- for {
- // Refill the buffer if necessary
- if bufp >= nbuf {
- bufp = 0
- nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
- if nbuf < 0 {
- nbuf = 0
- }
- if err != nil {
- return nil, os.NewSyscallError("readdirent", err)
- }
- if nbuf <= 0 {
- break // EOF
- }
- }
-
- // Drain the buffer
- nb, names = parseDirent(buf[bufp:nbuf], names)
- bufp += nb
- }
-
- sl := nameInoSlice(names)
- sort.Sort(sl)
- return sl, nil
-}
-
-// parseDirent is a minor modification of unix.ParseDirent (linux version)
-// which returns {name,inode} pairs instead of just names.
-func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
- origlen := len(buf)
- for len(buf) > 0 {
- dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
- buf = buf[dirent.Reclen:]
- if dirent.Ino == 0 { // File absent in directory.
- continue
- }
- b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
- name := string(b[0:clen(b[:])])
- if name == "." || name == ".." { // Useless names
- continue
- }
- names = append(names, nameIno{name, dirent.Ino})
- }
- return origlen - len(buf), names
-}
-
-func clen(n []byte) int {
- for i := 0; i < len(n); i++ {
- if n[i] == 0 {
- return i
- }
- }
- return len(n)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
deleted file mode 100644
index a8a3a5a..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go
+++ /dev/null
@@ -1,95 +0,0 @@
-//go:build !linux
-
-package archive
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-)
-
-func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
- var (
- oldRoot, newRoot *FileInfo
- err1, err2 error
- errs = make(chan error, 2)
- )
- go func() {
- oldRoot, err1 = collectFileInfo(oldDir)
- errs <- err1
- }()
- go func() {
- newRoot, err2 = collectFileInfo(newDir)
- errs <- err2
- }()
-
- // block until both routines have returned
- for i := 0; i < 2; i++ {
- if err := <-errs; err != nil {
- return nil, nil, err
- }
- }
-
- return oldRoot, newRoot, nil
-}
-
-func collectFileInfo(sourceDir string) (*FileInfo, error) {
- root := newRootFileInfo()
-
- err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error {
- if err != nil {
- return err
- }
-
- // Rebase path
- relPath, err := filepath.Rel(sourceDir, path)
- if err != nil {
- return err
- }
-
- // As this runs on the daemon side, file paths are OS specific.
- relPath = filepath.Join(string(os.PathSeparator), relPath)
-
- // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
- // Temporary workaround. If the returned path starts with two backslashes,
- // trim it down to a single backslash. Only relevant on Windows.
- if runtime.GOOS == "windows" {
- if strings.HasPrefix(relPath, `\\`) {
- relPath = relPath[1:]
- }
- }
-
- if relPath == string(os.PathSeparator) {
- return nil
- }
-
- parent := root.LookUp(filepath.Dir(relPath))
- if parent == nil {
- return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
- }
-
- s, err := os.Lstat(path)
- if err != nil {
- return err
- }
-
- info := &FileInfo{
- name: filepath.Base(relPath),
- children: make(map[string]*FileInfo),
- parent: parent,
- stat: s,
- }
-
- info.capability, _ = lgetxattr(path, "security.capability")
-
- parent.children[info.name] = info
-
- return nil
- })
- if err != nil {
- return nil, err
- }
- return root, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
deleted file mode 100644
index 4dd98bd..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
+++ /dev/null
@@ -1,43 +0,0 @@
-//go:build !windows
-
-package archive
-
-import (
- "io/fs"
- "os"
- "syscall"
-)
-
-func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
- oldSys := oldStat.Sys().(*syscall.Stat_t)
- newSys := newStat.Sys().(*syscall.Stat_t)
- // Don't look at size for dirs, its not a good measure of change
- if oldStat.Mode() != newStat.Mode() ||
- oldSys.Uid != newSys.Uid ||
- oldSys.Gid != newSys.Gid ||
- oldSys.Rdev != newSys.Rdev ||
- // Don't look at size or modification time for dirs, its not a good
- // measure of change. See https://github.com/moby/moby/issues/9874
- // for a description of the issue with modification time, and
- // https://github.com/moby/moby/pull/11422 for the change.
- // (Note that in the Windows implementation of this function,
- // modification time IS taken as a change). See
- // https://github.com/moby/moby/pull/37982 for more information.
- (!oldStat.Mode().IsDir() &&
- (!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) {
- return true
- }
- return false
-}
-
-func (info *FileInfo) isDir() bool {
- return info.parent == nil || info.stat.Mode().IsDir()
-}
-
-func getIno(fi os.FileInfo) uint64 {
- return fi.Sys().(*syscall.Stat_t).Ino
-}
-
-func hasHardlinks(fi os.FileInfo) bool {
- return fi.Sys().(*syscall.Stat_t).Nlink > 1
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
deleted file mode 100644
index c89605c..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package archive
-
-import (
- "io/fs"
- "os"
-)
-
-func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
- // Note there is slight difference between the Linux and Windows
- // implementations here. Due to https://github.com/moby/moby/issues/9874,
- // and the fix at https://github.com/moby/moby/pull/11422, Linux does not
- // consider a change to the directory time as a change. Windows on NTFS
- // does. See https://github.com/moby/moby/pull/37982 for more information.
-
- if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) ||
- oldStat.Mode() != newStat.Mode() ||
- oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
- return true
- }
- return false
-}
-
-func (info *FileInfo) isDir() bool {
- return info.parent == nil || info.stat.Mode().IsDir()
-}
-
-func getIno(fi os.FileInfo) (inode uint64) {
- return
-}
-
-func hasHardlinks(fi os.FileInfo) bool {
- return false
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
deleted file mode 100644
index cae0173..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy.go
+++ /dev/null
@@ -1,497 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "context"
- "errors"
- "io"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- "github.com/containerd/log"
-)
-
-// Errors used or returned by this file.
-var (
- ErrNotDirectory = errors.New("not a directory")
- ErrDirNotExists = errors.New("no such directory")
- ErrCannotCopyDir = errors.New("cannot copy directory")
- ErrInvalidCopySource = errors.New("invalid copy source content")
-)
-
-var copyPool = sync.Pool{
- New: func() interface{} { s := make([]byte, 32*1024); return &s },
-}
-
-func copyWithBuffer(dst io.Writer, src io.Reader) error {
- buf := copyPool.Get().(*[]byte)
- _, err := io.CopyBuffer(dst, src, *buf)
- copyPool.Put(buf)
- return err
-}
-
-// PreserveTrailingDotOrSeparator returns the given cleaned path (after
-// processing using any utility functions from the path or filepath stdlib
-// packages) and appends a trailing `/.` or `/` if its corresponding original
-// path (from before being processed by utility functions from the path or
-// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
-// path already ends in a `.` path segment, then another is not added. If the
-// clean path already ends in a path separator, then another is not added.
-func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
- // Ensure paths are in platform semantics
- cleanedPath = normalizePath(cleanedPath)
- originalPath = normalizePath(originalPath)
-
- if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
- if !hasTrailingPathSeparator(cleanedPath) {
- // Add a separator if it doesn't already end with one (a cleaned
- // path would only end in a separator if it is the root).
- cleanedPath += string(filepath.Separator)
- }
- cleanedPath += "."
- }
-
- if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
- cleanedPath += string(filepath.Separator)
- }
-
- return cleanedPath
-}
-
-// assertsDirectory returns whether the given path is
-// asserted to be a directory, i.e., the path ends with
-// a trailing '/' or `/.`, assuming a path separator of `/`.
-func assertsDirectory(path string) bool {
- return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
-}
-
-// hasTrailingPathSeparator returns whether the given
-// path ends with the system's path separator character.
-func hasTrailingPathSeparator(path string) bool {
- return len(path) > 0 && path[len(path)-1] == filepath.Separator
-}
-
-// specifiesCurrentDir returns whether the given path specifies
-// a "current directory", i.e., the last path segment is `.`.
-func specifiesCurrentDir(path string) bool {
- return filepath.Base(path) == "."
-}
-
-// SplitPathDirEntry splits the given path between its directory name and its
-// basename by first cleaning the path but preserves a trailing "." if the
-// original path specified the current directory.
-func SplitPathDirEntry(path string) (dir, base string) {
- cleanedPath := filepath.Clean(filepath.FromSlash(path))
-
- if specifiesCurrentDir(path) {
- cleanedPath += string(os.PathSeparator) + "."
- }
-
- return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
-}
-
-// TarResource archives the resource described by the given CopyInfo to a Tar
-// archive. A non-nil error is returned if sourcePath does not exist or is
-// asserted to be a directory but exists as another type of file.
-//
-// This function acts as a convenient wrapper around TarWithOptions, which
-// requires a directory as the source path. TarResource accepts either a
-// directory or a file path and correctly sets the Tar options.
-func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
- return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
-}
-
-// TarResourceRebase is like TarResource but renames the first path element of
-// items in the resulting tar archive to match the given rebaseName if not "".
-func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
- sourcePath = normalizePath(sourcePath)
- if _, err := os.Lstat(sourcePath); err != nil {
- // Catches the case where the source does not exist or is not a
- // directory if asserted to be a directory, as this also causes an
- // error.
- return nil, err
- }
-
- // Separate the source path between its directory and
- // the entry in that directory which we are archiving.
- sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
- opts := TarResourceRebaseOpts(sourceBase, rebaseName)
-
- log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir)
- return TarWithOptions(sourceDir, opts)
-}
-
-// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
-// parameters to be sent to TarWithOptions (the TarOptions struct)
-func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
- filter := []string{sourceBase}
- return &TarOptions{
- Compression: Uncompressed,
- IncludeFiles: filter,
- IncludeSourceDir: true,
- RebaseNames: map[string]string{
- sourceBase: rebaseName,
- },
- }
-}
-
-// CopyInfo holds basic info about the source
-// or destination path of a copy operation.
-type CopyInfo struct {
- Path string
- Exists bool
- IsDir bool
- RebaseName string
-}
-
-// CopyInfoSourcePath stats the given path to create a CopyInfo
-// struct representing that resource for the source of an archive copy
-// operation. The given path should be an absolute local path. A source path
-// has all symlinks evaluated that appear before the last path separator ("/"
-// on Unix). As it is to be a copy source, the path must exist.
-func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
- // normalize the file path and then evaluate the symbol link
- // we will use the target file instead of the symbol link if
- // followLink is set
- path = normalizePath(path)
-
- resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
- if err != nil {
- return CopyInfo{}, err
- }
-
- stat, err := os.Lstat(resolvedPath)
- if err != nil {
- return CopyInfo{}, err
- }
-
- return CopyInfo{
- Path: resolvedPath,
- Exists: true,
- IsDir: stat.IsDir(),
- RebaseName: rebaseName,
- }, nil
-}
-
-// CopyInfoDestinationPath stats the given path to create a CopyInfo
-// struct representing that resource for the destination of an archive copy
-// operation. The given path should be an absolute local path.
-func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
- maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
- path = normalizePath(path)
- originalPath := path
-
- stat, err := os.Lstat(path)
-
- if err == nil && stat.Mode()&os.ModeSymlink == 0 {
- // The path exists and is not a symlink.
- return CopyInfo{
- Path: path,
- Exists: true,
- IsDir: stat.IsDir(),
- }, nil
- }
-
- // While the path is a symlink.
- for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
- if n > maxSymlinkIter {
- // Don't follow symlinks more than this arbitrary number of times.
- return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
- }
-
- // The path is a symbolic link. We need to evaluate it so that the
- // destination of the copy operation is the link target and not the
- // link itself. This is notably different than CopyInfoSourcePath which
- // only evaluates symlinks before the last appearing path separator.
- // Also note that it is okay if the last path element is a broken
- // symlink as the copy operation should create the target.
- var linkTarget string
-
- linkTarget, err = os.Readlink(path)
- if err != nil {
- return CopyInfo{}, err
- }
-
- if !filepath.IsAbs(linkTarget) {
- // Join with the parent directory.
- dstParent, _ := SplitPathDirEntry(path)
- linkTarget = filepath.Join(dstParent, linkTarget)
- }
-
- path = linkTarget
- stat, err = os.Lstat(path)
- }
-
- if err != nil {
- // It's okay if the destination path doesn't exist. We can still
- // continue the copy operation if the parent directory exists.
- if !os.IsNotExist(err) {
- return CopyInfo{}, err
- }
-
- // Ensure destination parent dir exists.
- dstParent, _ := SplitPathDirEntry(path)
-
- parentDirStat, err := os.Stat(dstParent)
- if err != nil {
- return CopyInfo{}, err
- }
- if !parentDirStat.IsDir() {
- return CopyInfo{}, ErrNotDirectory
- }
-
- return CopyInfo{Path: path}, nil
- }
-
- // The path exists after resolving symlinks.
- return CopyInfo{
- Path: path,
- Exists: true,
- IsDir: stat.IsDir(),
- }, nil
-}
-
-// PrepareArchiveCopy prepares the given srcContent archive, which should
-// contain the archived resource described by srcInfo, to the destination
-// described by dstInfo. Returns the possibly modified content archive along
-// with the path to the destination directory which it should be extracted to.
-func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
- // Ensure in platform semantics
- srcInfo.Path = normalizePath(srcInfo.Path)
- dstInfo.Path = normalizePath(dstInfo.Path)
-
- // Separate the destination path between its directory and base
- // components in case the source archive contents need to be rebased.
- dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
- _, srcBase := SplitPathDirEntry(srcInfo.Path)
-
- switch {
- case dstInfo.Exists && dstInfo.IsDir:
- // The destination exists as a directory. No alteration
- // to srcContent is needed as its contents can be
- // simply extracted to the destination directory.
- return dstInfo.Path, io.NopCloser(srcContent), nil
- case dstInfo.Exists && srcInfo.IsDir:
- // The destination exists as some type of file and the source
- // content is a directory. This is an error condition since
- // you cannot copy a directory to an existing file location.
- return "", nil, ErrCannotCopyDir
- case dstInfo.Exists:
- // The destination exists as some type of file and the source content
- // is also a file. The source content entry will have to be renamed to
- // have a basename which matches the destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- case srcInfo.IsDir:
- // The destination does not exist and the source content is an archive
- // of a directory. The archive should be extracted to the parent of
- // the destination path instead, and when it is, the directory that is
- // created as a result should take the name of the destination path.
- // The source content entries will have to be renamed to have a
- // basename which matches the destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- case assertsDirectory(dstInfo.Path):
- // The destination does not exist and is asserted to be created as a
- // directory, but the source content is not a directory. This is an
- // error condition since you cannot create a directory from a file
- // source.
- return "", nil, ErrDirNotExists
- default:
- // The last remaining case is when the destination does not exist, is
- // not asserted to be a directory, and the source content is not an
- // archive of a directory. It this case, the destination file will need
- // to be created when the archive is extracted and the source content
- // entry will have to be renamed to have a basename which matches the
- // destination path's basename.
- if len(srcInfo.RebaseName) != 0 {
- srcBase = srcInfo.RebaseName
- }
- return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
- }
-}
-
-// RebaseArchiveEntries rewrites the given srcContent archive replacing
-// an occurrence of oldBase with newBase at the beginning of entry names.
-func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
- if oldBase == string(os.PathSeparator) {
- // If oldBase specifies the root directory, use an empty string as
- // oldBase instead so that newBase doesn't replace the path separator
- // that all paths will start with.
- oldBase = ""
- }
-
- rebased, w := io.Pipe()
-
- go func() {
- srcTar := tar.NewReader(srcContent)
- rebasedTar := tar.NewWriter(w)
-
- for {
- hdr, err := srcTar.Next()
- if err == io.EOF {
- // Signals end of archive.
- rebasedTar.Close()
- w.Close()
- return
- }
- if err != nil {
- w.CloseWithError(err)
- return
- }
-
- // srcContent tar stream, as served by TarWithOptions(), is
- // definitely in PAX format, but tar.Next() mistakenly guesses it
- // as USTAR, which creates a problem: if the newBase is >100
- // characters long, WriteHeader() returns an error like
- // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
- //
- // To fix, set the format to PAX here. See docker/for-linux issue #484.
- hdr.Format = tar.FormatPAX
- hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
- if hdr.Typeflag == tar.TypeLink {
- hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
- }
-
- if err = rebasedTar.WriteHeader(hdr); err != nil {
- w.CloseWithError(err)
- return
- }
-
- // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
- // and https://cure53.de/pentest-report_opa.pdf, which recommends to
- // replace io.Copy with io.CopyN7. The latter allows to specify the
- // maximum number of bytes that should be read. By properly defining
- // the limit, it can be assured that a GZip compression bomb cannot
- // easily cause a Denial-of-Service.
- // After reviewing with @tonistiigi and @cpuguy83, this should not
- // affect us, because here we do not read into memory, hence should
- // not be vulnerable to this code consuming memory.
- //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
- if _, err = io.Copy(rebasedTar, srcTar); err != nil {
- w.CloseWithError(err)
- return
- }
- }
- }()
-
- return rebased
-}
-
-// CopyResource performs an archive copy from the given source path to the
-// given destination path. The source path MUST exist and the destination
-// path's parent directory must exist.
-func CopyResource(srcPath, dstPath string, followLink bool) error {
- var (
- srcInfo CopyInfo
- err error
- )
-
- // Ensure in platform semantics
- srcPath = normalizePath(srcPath)
- dstPath = normalizePath(dstPath)
-
- // Clean the source and destination paths.
- srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
- dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
-
- if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
- return err
- }
-
- content, err := TarResource(srcInfo)
- if err != nil {
- return err
- }
- defer content.Close()
-
- return CopyTo(content, srcInfo, dstPath)
-}
-
-// CopyTo handles extracting the given content whose
-// entries should be sourced from srcInfo to dstPath.
-func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
- // The destination path need not exist, but CopyInfoDestinationPath will
- // ensure that at least the parent directory exists.
- dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
- if err != nil {
- return err
- }
-
- dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
- if err != nil {
- return err
- }
- defer copyArchive.Close()
-
- options := &TarOptions{
- NoLchown: true,
- NoOverwriteDirNonDir: true,
- }
-
- return Untar(copyArchive, dstDir, options)
-}
-
-// ResolveHostSourcePath decides real path need to be copied with parameters such as
-// whether to follow symbol link or not, if followLink is true, resolvedPath will return
-// link target of any symbol link file, else it will only resolve symlink of directory
-// but return symbol link file itself without resolving.
-func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
- if followLink {
- var err error
- resolvedPath, err = filepath.EvalSymlinks(path)
- if err != nil {
- return "", "", err
- }
-
- resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
- } else {
- dirPath, basePath := filepath.Split(path)
-
- // if not follow symbol link, then resolve symbol link of parent dir
- resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
- if err != nil {
- return "", "", err
- }
- // resolvedDirPath will have been cleaned (no trailing path separators) so
- // we can manually join it with the base path element.
- resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
- if hasTrailingPathSeparator(path) &&
- filepath.Base(path) != filepath.Base(resolvedPath) {
- rebaseName = filepath.Base(path)
- }
- }
- return resolvedPath, rebaseName, nil
-}
-
-// GetRebaseName normalizes and compares path and resolvedPath,
-// return completed resolved path and rebased file name
-func GetRebaseName(path, resolvedPath string) (string, string) {
- // linkTarget will have been cleaned (no trailing path separators and dot) so
- // we can manually join it with them
- var rebaseName string
- if specifiesCurrentDir(path) &&
- !specifiesCurrentDir(resolvedPath) {
- resolvedPath += string(filepath.Separator) + "."
- }
-
- if hasTrailingPathSeparator(path) &&
- !hasTrailingPathSeparator(resolvedPath) {
- resolvedPath += string(filepath.Separator)
- }
-
- if filepath.Base(path) != filepath.Base(resolvedPath) {
- // In the case where the path had a trailing separator and a symlink
- // evaluation has changed the last path component, we will need to
- // rebase the name in the archive that is being copied to match the
- // originally requested name.
- rebaseName = filepath.Base(path)
- }
- return resolvedPath, rebaseName
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go
new file mode 100644
index 0000000..1901e55
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go
@@ -0,0 +1,130 @@
+package archive
+
+import (
+ "io"
+
+ "github.com/moby/go-archive"
+ "github.com/moby/go-archive/compression"
+)
+
+var (
+ ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead.
+ ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead.
+ ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead.
+ ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead.
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path.
+//
+// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
+ return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath)
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename.
+//
+// Deprecated: use [archive.SplitPathDirEntry] instead.
+func SplitPathDirEntry(path string) (dir, base string) {
+ return archive.SplitPathDirEntry(path)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive.
+//
+// Deprecated: use [archive.TarResource] instead.
+func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) {
+ return archive.TarResource(sourceInfo)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+//
+// Deprecated: use [archive.TarResourceRebase] instead.
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
+ return archive.TarResourceRebase(sourcePath, rebaseName)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions.
+//
+// Deprecated: use [archive.TarResourceRebaseOpts] instead.
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: compression.None,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source or destination path of a copy operation.
+//
+// Deprecated: use [archive.CopyInfo] instead.
+type CopyInfo = archive.CopyInfo
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo struct.
+// struct representing that resource for the source of an archive copy
+// operation.
+//
+// Deprecated: use [archive.CopyInfoSourcePath] instead.
+func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) {
+ return archive.CopyInfoSourcePath(path, followLink)
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation.
+//
+// Deprecated: use [archive.CopyInfoDestinationPath] instead.
+func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) {
+ return archive.CopyInfoDestinationPath(path)
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive.
+//
+// Deprecated: use [archive.PrepareArchiveCopy] instead.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo)
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+//
+// Deprecated: use [archive.RebaseArchiveEntries] instead.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ return archive.RebaseArchiveEntries(srcContent, oldBase, newBase)
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path.
+//
+// Deprecated: use [archive.CopyResource] instead.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ return archive.CopyResource(srcPath, dstPath, followLink)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+//
+// Deprecated: use [archive.CopyTo] instead.
+func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error {
+ return archive.CopyTo(content, srcInfo, dstPath)
+}
+
+// ResolveHostSourcePath decides real path need to be copied.
+//
+// Deprecated: use [archive.ResolveHostSourcePath] instead.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
+ return archive.ResolveHostSourcePath(path, followLink)
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath.
+//
+// Deprecated: use [archive.GetRebaseName] instead.
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ return archive.GetRebaseName(path, resolvedPath)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
deleted file mode 100644
index f579282..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !windows
-
-package archive
-
-import (
- "path/filepath"
-)
-
-func normalizePath(path string) string {
- return filepath.ToSlash(path)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
deleted file mode 100644
index 2b775b4..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package archive
-
-import (
- "path/filepath"
-)
-
-func normalizePath(path string) string {
- return filepath.FromSlash(path)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go b/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go
deleted file mode 100644
index aa8e291..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build freebsd
-
-package archive
-
-import "golang.org/x/sys/unix"
-
-var mknod = unix.Mknod
diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_unix.go b/vendor/github.com/docker/docker/pkg/archive/dev_unix.go
deleted file mode 100644
index dffc596..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/dev_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build !windows && !freebsd
-
-package archive
-
-import "golang.org/x/sys/unix"
-
-func mknod(path string, mode uint32, dev uint64) error {
- return unix.Mknod(path, mode, int(dev))
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
deleted file mode 100644
index d5a394c..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/diff.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "context"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/containerd/log"
-)
-
-// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
-// compressed or uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
- tr := tar.NewReader(layer)
-
- var dirs []*tar.Header
- unpackedPaths := make(map[string]struct{})
-
- if options == nil {
- options = &TarOptions{}
- }
- if options.ExcludePatterns == nil {
- options.ExcludePatterns = []string{}
- }
-
- aufsTempdir := ""
- aufsHardlinks := make(map[string]*tar.Header)
-
- // Iterate through the files in the archive.
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- // end of tar archive
- break
- }
- if err != nil {
- return 0, err
- }
-
- size += hdr.Size
-
- // Normalize name, for safety and for a simple is-root check
- hdr.Name = filepath.Clean(hdr.Name)
-
- // Windows does not support filenames with colons in them. Ignore
- // these files. This is not a problem though (although it might
- // appear that it is). Let's suppose a client is running docker pull.
- // The daemon it points to is Windows. Would it make sense for the
- // client to be doing a docker pull Ubuntu for example (which has files
- // with colons in the name under /usr/share/man/man3)? No, absolutely
- // not as it would really only make sense that they were pulling a
- // Windows image. However, for development, it is necessary to be able
- // to pull Linux images which are in the repository.
- //
- // TODO Windows. Once the registry is aware of what images are Windows-
- // specific or Linux-specific, this warning should be changed to an error
- // to cater for the situation where someone does manage to upload a Linux
- // image but have it tagged as Windows inadvertently.
- if runtime.GOOS == "windows" {
- if strings.Contains(hdr.Name, ":") {
- log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
- continue
- }
- }
-
- // Ensure that the parent directory exists.
- err = createImpliedDirectories(dest, hdr, options)
- if err != nil {
- return 0, err
- }
-
- // Skip AUFS metadata dirs
- if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
- // Regular files inside /.wh..wh.plnk can be used as hardlink targets
- // We don't want this directory, but we need the files in them so that
- // such hardlinks can be resolved.
- if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
- basename := filepath.Base(hdr.Name)
- aufsHardlinks[basename] = hdr
- if aufsTempdir == "" {
- if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil {
- return 0, err
- }
- defer os.RemoveAll(aufsTempdir)
- }
- if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil {
- return 0, err
- }
- }
-
- if hdr.Name != WhiteoutOpaqueDir {
- continue
- }
- }
- // #nosec G305 -- The joined path is guarded against path traversal.
- path := filepath.Join(dest, hdr.Name)
- rel, err := filepath.Rel(dest, path)
- if err != nil {
- return 0, err
- }
-
- // Note as these operations are platform specific, so must the slash be.
- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
- return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
- }
- base := filepath.Base(path)
-
- if strings.HasPrefix(base, WhiteoutPrefix) {
- dir := filepath.Dir(path)
- if base == WhiteoutOpaqueDir {
- _, err := os.Lstat(dir)
- if err != nil {
- return 0, err
- }
- err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error {
- if err != nil {
- if os.IsNotExist(err) {
- err = nil // parent was deleted
- }
- return err
- }
- if path == dir {
- return nil
- }
- if _, exists := unpackedPaths[path]; !exists {
- return os.RemoveAll(path)
- }
- return nil
- })
- if err != nil {
- return 0, err
- }
- } else {
- originalBase := base[len(WhiteoutPrefix):]
- originalPath := filepath.Join(dir, originalBase)
- if err := os.RemoveAll(originalPath); err != nil {
- return 0, err
- }
- }
- } else {
- // If path exits we almost always just want to remove and replace it.
- // The only exception is when it is a directory *and* the file from
- // the layer is also a directory. Then we want to merge them (i.e.
- // just apply the metadata from the layer).
- if fi, err := os.Lstat(path); err == nil {
- if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
- if err := os.RemoveAll(path); err != nil {
- return 0, err
- }
- }
- }
-
- srcData := io.Reader(tr)
- srcHdr := hdr
-
- // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
- // we manually retarget these into the temporary files we extracted them into
- if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
- linkBasename := filepath.Base(hdr.Linkname)
- srcHdr = aufsHardlinks[linkBasename]
- if srcHdr == nil {
- return 0, fmt.Errorf("Invalid aufs hardlink")
- }
- tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
- if err != nil {
- return 0, err
- }
- defer tmpFile.Close()
- srcData = tmpFile
- }
-
- if err := remapIDs(options.IDMap, srcHdr); err != nil {
- return 0, err
- }
-
- if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil {
- return 0, err
- }
-
- // Directory mtimes must be handled at the end to avoid further
- // file creation in them to modify the directory mtime
- if hdr.Typeflag == tar.TypeDir {
- dirs = append(dirs, hdr)
- }
- unpackedPaths[path] = struct{}{}
- }
- }
-
- for _, hdr := range dirs {
- // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
- path := filepath.Join(dest, hdr.Name)
- if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
- return 0, err
- }
- }
-
- return size, nil
-}
-
-// ApplyLayer parses a diff in the standard layer format from `layer`,
-// and applies it to the directory `dest`. The stream `layer` can be
-// compressed or uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func ApplyLayer(dest string, layer io.Reader) (int64, error) {
- return applyLayerHandler(dest, layer, &TarOptions{}, true)
-}
-
-// ApplyUncompressedLayer parses a diff in the standard layer format from
-// `layer`, and applies it to the directory `dest`. The stream `layer`
-// can only be uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
- return applyLayerHandler(dest, layer, options, false)
-}
-
-// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
-func IsEmpty(rd io.Reader) (bool, error) {
- decompRd, err := DecompressStream(rd)
- if err != nil {
- return true, fmt.Errorf("failed to decompress archive: %v", err)
- }
- defer decompRd.Close()
-
- tarReader := tar.NewReader(decompRd)
- if _, err := tarReader.Next(); err != nil {
- if err == io.EOF {
- return true, nil
- }
- return false, fmt.Errorf("failed to read next archive header: %v", err)
- }
-
- return false, nil
-}
-
-// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
-func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
- dest = filepath.Clean(dest)
-
- // We need to be able to set any perms
- restore := overrideUmask(0)
- defer restore()
-
- if decompress {
- decompLayer, err := DecompressStream(layer)
- if err != nil {
- return 0, err
- }
- defer decompLayer.Close()
- layer = decompLayer
- }
- return UnpackLayer(dest, layer, options)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go
new file mode 100644
index 0000000..dd5e0d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go
@@ -0,0 +1,37 @@
+package archive
+
+import (
+ "io"
+
+ "github.com/moby/go-archive"
+)
+
+// UnpackLayer unpack `layer` to a `dest`.
+//
+// Deprecated: use [archive.UnpackLayer] instead.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ return archive.UnpackLayer(dest, layer, toArchiveOpt(options))
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`.
+//
+// Deprecated: use [archive.ApplyLayer] instead.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return archive.ApplyLayer(dest, layer)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`.
+//
+// Deprecated: use [archive.ApplyUncompressedLayer] instead.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options))
+}
+
+// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
+//
+// Deprecated: use [archive.IsEmpty] instead.
+func IsEmpty(rd io.Reader) (bool, error) {
+ return archive.IsEmpty(rd)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go
deleted file mode 100644
index 7216f2f..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !windows
-
-package archive
-
-import "golang.org/x/sys/unix"
-
-// overrideUmask sets current process's file mode creation mask to newmask
-// and returns a function to restore it.
-//
-// WARNING for readers stumbling upon this code. Changing umask in a multi-
-// threaded environment isn't safe. Don't use this without understanding the
-// risks, and don't export this function for others to use (we shouldn't even
-// be using this ourself).
-//
-// FIXME(thaJeztah): we should get rid of these hacks if possible.
-func overrideUmask(newMask int) func() {
- oldMask := unix.Umask(newMask)
- return func() {
- unix.Umask(oldMask)
- }
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go
deleted file mode 100644
index d28f5b2..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package archive
-
-// overrideUmask is a no-op on windows.
-func overrideUmask(newmask int) func() {
- return func() {}
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path.go b/vendor/github.com/docker/docker/pkg/archive/path.go
deleted file mode 100644
index 888a697..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/path.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package archive
-
-// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
-// is the system drive.
-// On Linux: this is a no-op.
-// On Windows: this does the following>
-// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
-// This is used, for example, when validating a user provided path in docker cp.
-// If a drive letter is supplied, it must be the system drive. The drive letter
-// is always removed. Also, it translates it to OS semantics (IOW / to \). We
-// need the path in this syntax so that it can ultimately be concatenated with
-// a Windows long-path which doesn't support drive-letters. Examples:
-// C: --> Fail
-// C:\ --> \
-// a --> a
-// /a --> \a
-// d:\ --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- return checkSystemDriveAndRemoveDriveLetter(path)
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go
new file mode 100644
index 0000000..0fa74de
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go
@@ -0,0 +1,10 @@
+package archive
+
+import "github.com/moby/go-archive"
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive.
+//
+// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return archive.CheckSystemDriveAndRemoveDriveLetter(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go
deleted file mode 100644
index 390264b..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build !windows
-
-package archive
-
-// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation
-// of CheckSystemDriveAndRemoveDriveLetter
-func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- return path, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path_windows.go b/vendor/github.com/docker/docker/pkg/archive/path_windows.go
deleted file mode 100644
index 7e18c8e..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/path_windows.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package archive
-
-import (
- "fmt"
- "path/filepath"
- "strings"
-)
-
-// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
-// of CheckSystemDriveAndRemoveDriveLetter
-func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("no relative path specified in %q", path)
- }
- if !filepath.IsAbs(path) || len(path) < 2 {
- return filepath.FromSlash(path), nil
- }
- if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("the specified path is not on the system drive (C:)")
- }
- return filepath.FromSlash(path[2:]), nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time.go b/vendor/github.com/docker/docker/pkg/archive/time.go
deleted file mode 100644
index 4e9ae95..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/time.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package archive
-
-import (
- "syscall"
- "time"
- "unsafe"
-)
-
-var (
- minTime = time.Unix(0, 0)
- maxTime time.Time
-)
-
-func init() {
- if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
- // This is a 64 bit timespec
- // os.Chtimes limits time to the following
- maxTime = time.Unix(0, 1<<63-1)
- } else {
- // This is a 32 bit timespec
- maxTime = time.Unix(1<<31-1, 0)
- }
-}
-
-func boundTime(t time.Time) time.Time {
- if t.Before(minTime) || t.After(maxTime) {
- return minTime
- }
-
- return t
-}
-
-func latestTime(t1, t2 time.Time) time.Time {
- if t1.Before(t2) {
- return t2
- }
- return t1
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go b/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go
deleted file mode 100644
index 5bfdfa2..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//go:build !windows
-
-package archive
-
-import (
- "os"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// chtimes changes the access time and modified time of a file at the given path.
-// If the modified time is prior to the Unix Epoch (unixMinTime), or after the
-// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this
-// case, Chtimes defaults to Unix Epoch, just in case.
-func chtimes(name string, atime time.Time, mtime time.Time) error {
- return os.Chtimes(name, atime, mtime)
-}
-
-func timeToTimespec(time time.Time) unix.Timespec {
- if time.IsZero() {
- // Return UTIME_OMIT special value
- return unix.Timespec{
- Sec: 0,
- Nsec: (1 << 30) - 2,
- }
- }
- return unix.NsecToTimespec(time.UnixNano())
-}
-
-func lchtimes(name string, atime time.Time, mtime time.Time) error {
- utimes := [2]unix.Timespec{
- timeToTimespec(atime),
- timeToTimespec(mtime),
- }
- err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW)
- if err != nil && err != unix.ENOSYS {
- return err
- }
- return err
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_windows.go b/vendor/github.com/docker/docker/pkg/archive/time_windows.go
deleted file mode 100644
index af1f7c8..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/time_windows.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package archive
-
-import (
- "os"
- "time"
-
- "golang.org/x/sys/windows"
-)
-
-func chtimes(name string, atime time.Time, mtime time.Time) error {
- if err := os.Chtimes(name, atime, mtime); err != nil {
- return err
- }
-
- pathp, err := windows.UTF16PtrFromString(name)
- if err != nil {
- return err
- }
- h, err := windows.CreateFile(pathp,
- windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
- windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
- if err != nil {
- return err
- }
- defer windows.Close(h)
- c := windows.NsecToFiletime(mtime.UnixNano())
- return windows.SetFileTime(h, &c, nil, nil)
-}
-
-func lchtimes(name string, atime time.Time, mtime time.Time) error {
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/utils.go b/vendor/github.com/docker/docker/pkg/archive/utils.go
new file mode 100644
index 0000000..692cf16
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/utils.go
@@ -0,0 +1,42 @@
+package archive
+
+import (
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/moby/go-archive"
+)
+
+// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions].
+//
+// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module.
+func ToArchiveOpt(options *TarOptions) *archive.TarOptions {
+ return toArchiveOpt(options)
+}
+
+func toArchiveOpt(options *TarOptions) *archive.TarOptions {
+ if options == nil {
+ return nil
+ }
+
+ var chownOpts *archive.ChownOpts
+ if options.ChownOpts != nil {
+ chownOpts = &archive.ChownOpts{
+ UID: options.ChownOpts.UID,
+ GID: options.ChownOpts.GID,
+ }
+ }
+
+ return &archive.TarOptions{
+ IncludeFiles: options.IncludeFiles,
+ ExcludePatterns: options.ExcludePatterns,
+ Compression: options.Compression,
+ NoLchown: options.NoLchown,
+ IDMap: idtools.ToUserIdentityMapping(options.IDMap),
+ ChownOpts: chownOpts,
+ IncludeSourceDir: options.IncludeSourceDir,
+ WhiteoutFormat: options.WhiteoutFormat,
+ NoOverwriteDirNonDir: options.NoOverwriteDirNonDir,
+ RebaseNames: options.RebaseNames,
+ InUserNS: options.InUserNS,
+ BestEffortXattrs: options.BestEffortXattrs,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
deleted file mode 100644
index d20478a..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package archive
-
-// Whiteouts are files with a special meaning for the layered filesystem.
-// Docker uses AUFS whiteout files inside exported archives. In other
-// filesystems these files are generated/handled on tar creation/extraction.
-
-// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
-// filename this means that file has been removed from the base layer.
-const WhiteoutPrefix = ".wh."
-
-// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
-// for removing an actual file. Normally these files are excluded from exported
-// archives.
-const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
-
-// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
-// layers. Normally these should not go into exported archives and all changed
-// hardlinks should be copied to the top layer.
-const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
-
-// WhiteoutOpaqueDir file means directory has been made opaque - meaning
-// readdir calls to this directory do not follow to lower layers.
-const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go
new file mode 100644
index 0000000..0ab8590
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go
@@ -0,0 +1,10 @@
+package archive
+
+import "github.com/moby/go-archive"
+
+const (
+ WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead.
+ WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead.
+ WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead.
+ WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead.
+)
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
deleted file mode 100644
index f8a9725..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/wrap.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package archive
-
-import (
- "archive/tar"
- "bytes"
- "io"
-)
-
-// Generate generates a new archive from the content provided
-// as input.
-//
-// `files` is a sequence of path/content pairs. A new file is
-// added to the archive for each pair.
-// If the last pair is incomplete, the file is created with an
-// empty content. For example:
-//
-// Generate("foo.txt", "hello world", "emptyfile")
-//
-// The above call will return an archive with 2 files:
-// - ./foo.txt with content "hello world"
-// - ./empty with empty content
-//
-// FIXME: stream content instead of buffering
-// FIXME: specify permissions and other archive metadata
-func Generate(input ...string) (io.Reader, error) {
- files := parseStringPairs(input...)
- buf := new(bytes.Buffer)
- tw := tar.NewWriter(buf)
- for _, file := range files {
- name, content := file[0], file[1]
- hdr := &tar.Header{
- Name: name,
- Size: int64(len(content)),
- }
- if err := tw.WriteHeader(hdr); err != nil {
- return nil, err
- }
- if _, err := tw.Write([]byte(content)); err != nil {
- return nil, err
- }
- }
- if err := tw.Close(); err != nil {
- return nil, err
- }
- return buf, nil
-}
-
-func parseStringPairs(input ...string) [][2]string {
- output := make([][2]string, 0, len(input)/2+1)
- for i := 0; i < len(input); i += 2 {
- var pair [2]string
- pair[0] = input[i]
- if i+1 < len(input) {
- pair[1] = input[i+1]
- }
- output = append(output, pair)
- }
- return output
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go
new file mode 100644
index 0000000..e5d3fa9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go
@@ -0,0 +1,14 @@
+package archive
+
+import (
+ "io"
+
+ "github.com/moby/go-archive"
+)
+
+// Generate generates a new archive from the content provided as input.
+//
+// Deprecated: use [archive.Generate] instead.
+func Generate(input ...string) (io.Reader, error) {
+ return archive.Generate(input...)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go
deleted file mode 100644
index 652a1f0..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go
+++ /dev/null
@@ -1,52 +0,0 @@
-//go:build linux || darwin || freebsd || netbsd
-
-package archive
-
-import (
- "errors"
- "fmt"
- "io/fs"
-
- "golang.org/x/sys/unix"
-)
-
-// lgetxattr retrieves the value of the extended attribute identified by attr
-// and associated with the given path in the file system.
-// It returns a nil slice and nil error if the xattr is not set.
-func lgetxattr(path string, attr string) ([]byte, error) {
- // Start with a 128 length byte array
- dest := make([]byte, 128)
- sz, err := unix.Lgetxattr(path, attr, dest)
-
- for errors.Is(err, unix.ERANGE) {
- // Buffer too small, use zero-sized buffer to get the actual size
- sz, err = unix.Lgetxattr(path, attr, []byte{})
- if err != nil {
- return nil, wrapPathError("lgetxattr", path, attr, err)
- }
- dest = make([]byte, sz)
- sz, err = unix.Lgetxattr(path, attr, dest)
- }
-
- if err != nil {
- if errors.Is(err, noattr) {
- return nil, nil
- }
- return nil, wrapPathError("lgetxattr", path, attr, err)
- }
-
- return dest[:sz], nil
-}
-
-// lsetxattr sets the value of the extended attribute identified by attr
-// and associated with the given path in the file system.
-func lsetxattr(path string, attr string, data []byte, flags int) error {
- return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags))
-}
-
-func wrapPathError(op, path, attr string, err error) error {
- if err == nil {
- return nil
- }
- return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)}
-}
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go
deleted file mode 100644
index f2e7646..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package archive
-
-import "golang.org/x/sys/unix"
-
-var noattr = unix.ENODATA
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go
deleted file mode 100644
index 4d88241..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !linux && !windows
-
-package archive
-
-import "golang.org/x/sys/unix"
-
-var noattr = unix.ENOATTR
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go
deleted file mode 100644
index b0d9165..0000000
--- a/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !linux && !darwin && !freebsd && !netbsd
-
-package archive
-
-func lgetxattr(path string, attr string) ([]byte, error) {
- return nil, nil
-}
-
-func lsetxattr(path string, attr string, data []byte, flags int) error {
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
index d2fbd94..23e90c2 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -3,11 +3,15 @@ package idtools
import (
"fmt"
"os"
+
+ "github.com/moby/sys/user"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
+//
+// Deprecated: use [user.IDMap] instead.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
@@ -17,28 +21,42 @@ type IDMap struct {
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership and permissions.
+//
+// Deprecated: use [user.MkdirAllAndChown] instead.
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
- return mkdirAs(path, mode, owner, true, true)
+ return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership and permissions.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
+//
+// Deprecated: use [user.MkdirAndChown] instead.
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
- return mkdirAs(path, mode, owner, false, true)
+ return user.MkdirAndChown(path, mode, owner.UID, owner.GID)
}
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership or permissions will be performed
+//
+// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead.
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
- return mkdirAs(path, mode, owner, true, false)
+ return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
+//
+// Deprecated: use [(user.IdentityMapping).RootPair] instead.
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ return getRootUIDGID(uidMap, gidMap)
+}
+
+// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
@@ -101,11 +119,61 @@ type IdentityMapping struct {
GIDMaps []IDMap `json:"GIDMaps"`
}
+// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping].
+//
+// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
+func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping {
+ return IdentityMapping{
+ UIDMaps: fromUserIDMap(u.UIDMaps),
+ GIDMaps: fromUserIDMap(u.GIDMaps),
+ }
+}
+
+func fromUserIDMap(u []user.IDMap) []IDMap {
+ if u == nil {
+ return nil
+ }
+ m := make([]IDMap, len(u))
+ for i := range u {
+ m[i] = IDMap{
+ ContainerID: int(u[i].ID),
+ HostID: int(u[i].ParentID),
+ Size: int(u[i].Count),
+ }
+ }
+ return m
+}
+
+// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping].
+//
+// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package.
+func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping {
+ return user.IdentityMapping{
+ UIDMaps: toUserIDMap(u.UIDMaps),
+ GIDMaps: toUserIDMap(u.GIDMaps),
+ }
+}
+
+func toUserIDMap(u []IDMap) []user.IDMap {
+ if u == nil {
+ return nil
+ }
+ m := make([]user.IDMap, len(u))
+ for i := range u {
+ m[i] = user.IDMap{
+ ID: int64(u[i].ContainerID),
+ ParentID: int64(u[i].HostID),
+ Count: int64(u[i].Size),
+ }
+ }
+ return m
+}
+
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i IdentityMapping) RootPair() Identity {
- uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps)
+ uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
return Identity{UID: uid, GID: gid}
}
@@ -144,6 +212,8 @@ func (i IdentityMapping) Empty() bool {
}
// CurrentIdentity returns the identity of the current process
+//
+// Deprecated: use [os.Getuid] and [os.Getegid] instead.
func CurrentIdentity() Identity {
return Identity{UID: os.Getuid(), GID: os.Getegid()}
}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
deleted file mode 100644
index 1f11fe4..0000000
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
+++ /dev/null
@@ -1,166 +0,0 @@
-//go:build !windows
-
-package idtools
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strconv"
- "syscall"
-
- "github.com/moby/sys/user"
-)
-
-func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
- path, err := filepath.Abs(path)
- if err != nil {
- return err
- }
-
- stat, err := os.Stat(path)
- if err == nil {
- if !stat.IsDir() {
- return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
- }
- if !chownExisting {
- return nil
- }
-
- // short-circuit -- we were called with an existing directory and chown was requested
- return setPermissions(path, mode, owner, stat)
- }
-
- // make an array containing the original path asked for, plus (for mkAll == true)
- // all path components leading up to the complete path that don't exist before we MkdirAll
- // so that we can chown all of them properly at the end. If chownExisting is false, we won't
- // chown the full directory path if it exists
- var paths []string
- if os.IsNotExist(err) {
- paths = []string{path}
- }
-
- if mkAll {
- // walk back to "/" looking for directories which do not exist
- // and add them to the paths array for chown after creation
- dirPath := path
- for {
- dirPath = filepath.Dir(dirPath)
- if dirPath == "/" {
- break
- }
- if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) {
- paths = append(paths, dirPath)
- }
- }
- if err = os.MkdirAll(path, mode); err != nil {
- return err
- }
- } else if err = os.Mkdir(path, mode); err != nil {
- return err
- }
- // even if it existed, we will chown the requested path + any subpaths that
- // didn't exist when we called MkdirAll
- for _, pathComponent := range paths {
- if err = setPermissions(pathComponent, mode, owner, nil); err != nil {
- return err
- }
- }
- return nil
-}
-
-// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username
-//
-// Deprecated: use [user.LookupUser] instead
-func LookupUser(name string) (user.User, error) {
- return user.LookupUser(name)
-}
-
-// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid
-//
-// Deprecated: use [user.LookupUid] instead
-func LookupUID(uid int) (user.User, error) {
- return user.LookupUid(uid)
-}
-
-// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
-//
-// Deprecated: use [user.LookupGroup] instead
-func LookupGroup(name string) (user.Group, error) {
- return user.LookupGroup(name)
-}
-
-// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
-// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
-// dir is on an NFS share, so don't call chown unless we absolutely must.
-// Likewise for setting permissions.
-func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error {
- if stat == nil {
- var err error
- stat, err = os.Stat(p)
- if err != nil {
- return err
- }
- }
- if stat.Mode().Perm() != mode.Perm() {
- if err := os.Chmod(p, mode.Perm()); err != nil {
- return err
- }
- }
- ssi := stat.Sys().(*syscall.Stat_t)
- if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) {
- return nil
- }
- return os.Chown(p, owner.UID, owner.GID)
-}
-
-// LoadIdentityMapping takes a requested username and
-// using the data from /etc/sub{uid,gid} ranges, creates the
-// proper uid and gid remapping ranges for that user/group pair
-func LoadIdentityMapping(name string) (IdentityMapping, error) {
- // TODO: Consider adding support for calling out to "getent"
- usr, err := user.LookupUser(name)
- if err != nil {
- return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err)
- }
-
- subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
- if err != nil {
- return IdentityMapping{}, err
- }
- subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
- if err != nil {
- return IdentityMapping{}, err
- }
-
- return IdentityMapping{
- UIDMaps: subuidRanges,
- GIDMaps: subgidRanges,
- }, nil
-}
-
-func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) {
- uidstr := strconv.Itoa(usr.Uid)
- rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool {
- return sid.Name == usr.Name || sid.Name == uidstr
- })
- if err != nil {
- return nil, err
- }
- if len(rangeList) == 0 {
- return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
- }
-
- idMap := []IDMap{}
-
- containerID := 0
- for _, idrange := range rangeList {
- idMap = append(idMap, IDMap{
- ContainerID: containerID,
- HostID: int(idrange.SubID),
- Size: int(idrange.Count),
- })
- containerID = containerID + int(idrange.Count)
- }
- return idMap, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
index a12b140..f83f59f 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -1,9 +1,5 @@
package idtools
-import (
- "os"
-)
-
const (
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
@@ -14,11 +10,3 @@ const (
ContainerUserSidString = "S-1-5-93-2-2"
)
-
-// This is currently a wrapper around [os.MkdirAll] since currently
-// permissions aren't set through this path, the identity isn't utilized.
-// Ownership is handled elsewhere, but in the future could be support here
-// too.
-func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error {
- return os.MkdirAll(path, 0)
-}