summaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-05-11 21:12:57 -0600
committermo khan <mo@mokhan.ca>2025-05-11 21:12:57 -0600
commit60440f90dca28e99a31dd328c5f6d5dc0f9b6a2e (patch)
tree2f54adf55086516f162f0a55a5347e6b25f7f176 /vendor/github.com/docker
parent05ca9b8d3a9c7203a3a3b590beaa400900bd9007 (diff)
chore: vendor go dependencies
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/docker/AUTHORS2486
-rw-r--r--vendor/github.com/docker/docker/LICENSE191
-rw-r--r--vendor/github.com/docker/docker/NOTICE19
-rw-r--r--vendor/github.com/docker/docker/api/README.md42
-rw-r--r--vendor/github.com/docker/docker/api/common.go20
-rw-r--r--vendor/github.com/docker/docker/api/swagger-gen.yaml12
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml13528
-rw-r--r--vendor/github.com/docker/docker/api/types/blkiodev/blkio.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/checkpoint/list.go7
-rw-r--r--vendor/github.com/docker/docker/api/types/checkpoint/options.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go256
-rw-r--r--vendor/github.com/docker/docker/api/types/common/id_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/container/change_type.go15
-rw-r--r--vendor/github.com/docker/docker/api/types/container/change_types.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/container/commit.go7
-rw-r--r--vendor/github.com/docker/docker/api/types/container/config.go73
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container.go188
-rw-r--r--vendor/github.com/docker/docker/api/types/container/create_request.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/container/create_response.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/container/errors.go9
-rw-r--r--vendor/github.com/docker/docker/api/types/container/exec.go51
-rw-r--r--vendor/github.com/docker/docker/api/types/container/filesystem_change.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/container/health.go26
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig.go501
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go45
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go47
-rw-r--r--vendor/github.com/docker/docker/api/types/container/network_settings.go56
-rw-r--r--vendor/github.com/docker/docker/api/types/container/options.go67
-rw-r--r--vendor/github.com/docker/docker/api/types/container/port.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/container/stats.go177
-rw-r--r--vendor/github.com/docker/docker/api/types/container/top_response.go18
-rw-r--r--vendor/github.com/docker/docker/api/types/container/update_response.go14
-rw-r--r--vendor/github.com/docker/docker/api/types/container/wait_exit_error.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/container/wait_response.go18
-rw-r--r--vendor/github.com/docker/docker/api/types/container/waitcondition.go22
-rw-r--r--vendor/github.com/docker/docker/api/types/error_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/error_response_ext.go6
-rw-r--r--vendor/github.com/docker/docker/api/types/events/events.go135
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/errors.go24
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go336
-rw-r--r--vendor/github.com/docker/docker/api/types/image/delete_response.go15
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image.go47
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image_history.go36
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image_inspect.go140
-rw-r--r--vendor/github.com/docker/docker/api/types/image/manifest.go99
-rw-r--r--vendor/github.com/docker/docker/api/types/image/opts.go116
-rw-r--r--vendor/github.com/docker/docker/api/types/image/summary.go101
-rw-r--r--vendor/github.com/docker/docker/api/types/mount/mount.go157
-rw-r--r--vendor/github.com/docker/docker/api/types/network/create_response.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/network/endpoint.go153
-rw-r--r--vendor/github.com/docker/docker/api/types/network/ipam.go134
-rw-r--r--vendor/github.com/docker/docker/api/types/network/network.go168
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin.go203
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_device.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_env.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_interface_type.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_mount.go37
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_responses.go71
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/authconfig.go109
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/authenticate.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go116
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/search.go48
-rw-r--r--vendor/github.com/docker/docker/api/types/storage/driver_data.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/strslice/strslice.go30
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/common.go48
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/config.go46
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/container.go119
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/network.go121
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/node.go139
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime.go27
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go3
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go808
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto19
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/secret.go50
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service.go202
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service_create_response.go20
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service_update_response.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/swarm.go237
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/task.go225
-rw-r--r--vendor/github.com/docker/docker/api/types/system/info.go153
-rw-r--r--vendor/github.com/docker/docker/api/types/system/runtime.go20
-rw-r--r--vendor/github.com/docker/docker/api/types/system/security_opts.go48
-rw-r--r--vendor/github.com/docker/docker/api/types/time/timestamp.go131
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go179
-rw-r--r--vendor/github.com/docker/docker/api/types/types_deprecated.go115
-rw-r--r--vendor/github.com/docker/docker/api/types/versions/compare.go65
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/cluster_volume.go420
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/create_options.go29
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/list_response.go18
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/options.go15
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volume.go75
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volume_update.go7
-rw-r--r--vendor/github.com/docker/docker/client/README.md38
-rw-r--r--vendor/github.com/docker/docker/client/build_cancel.go16
-rw-r--r--vendor/github.com/docker/docker/client/build_prune.go56
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint.go18
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_create.go19
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_delete.go25
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_list.go28
-rw-r--r--vendor/github.com/docker/docker/client/client.go474
-rw-r--r--vendor/github.com/docker/docker/client/client_deprecated.go27
-rw-r--r--vendor/github.com/docker/docker/client/client_interfaces.go236
-rw-r--r--vendor/github.com/docker/docker/client/client_unix.go7
-rw-r--r--vendor/github.com/docker/docker/client/client_windows.go5
-rw-r--r--vendor/github.com/docker/docker/client/config_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/config_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/config_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/config_remove.go17
-rw-r--r--vendor/github.com/docker/docker/client/config_update.go24
-rw-r--r--vendor/github.com/docker/docker/client/container_attach.go65
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go60
-rw-r--r--vendor/github.com/docker/docker/client/container_copy.go104
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go168
-rw-r--r--vendor/github.com/docker/docker/client/container_diff.go30
-rw-r--r--vendor/github.com/docker/docker/client/container_exec.go81
-rw-r--r--vendor/github.com/docker/docker/client/container_export.go24
-rw-r--r--vendor/github.com/docker/docker/client/container_inspect.go57
-rw-r--r--vendor/github.com/docker/docker/client/container_kill.go23
-rw-r--r--vendor/github.com/docker/docker/client/container_list.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_logs.go85
-rw-r--r--vendor/github.com/docker/docker/client/container_pause.go15
-rw-r--r--vendor/github.com/docker/docker/client/container_prune.go35
-rw-r--r--vendor/github.com/docker/docker/client/container_remove.go32
-rw-r--r--vendor/github.com/docker/docker/client/container_rename.go20
-rw-r--r--vendor/github.com/docker/docker/client/container_resize.go38
-rw-r--r--vendor/github.com/docker/docker/client/container_restart.go41
-rw-r--r--vendor/github.com/docker/docker/client/container_start.go28
-rw-r--r--vendor/github.com/docker/docker/client/container_stats.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_stop.go45
-rw-r--r--vendor/github.com/docker/docker/client/container_top.go33
-rw-r--r--vendor/github.com/docker/docker/client/container_unpause.go15
-rw-r--r--vendor/github.com/docker/docker/client/container_update.go26
-rw-r--r--vendor/github.com/docker/docker/client/container_wait.go122
-rw-r--r--vendor/github.com/docker/docker/client/disk_usage.go33
-rw-r--r--vendor/github.com/docker/docker/client/distribution_inspect.go39
-rw-r--r--vendor/github.com/docker/docker/client/envvars.go90
-rw-r--r--vendor/github.com/docker/docker/client/errors.go85
-rw-r--r--vendor/github.com/docker/docker/client/events.go100
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go139
-rw-r--r--vendor/github.com/docker/docker/client/image_build.go182
-rw-r--r--vendor/github.com/docker/docker/client/image_create.go40
-rw-r--r--vendor/github.com/docker/docker/client/image_history.go56
-rw-r--r--vendor/github.com/docker/docker/client/image_history_opts.go19
-rw-r--r--vendor/github.com/docker/docker/client/image_import.go48
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go65
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect_opts.go50
-rw-r--r--vendor/github.com/docker/docker/client/image_list.go67
-rw-r--r--vendor/github.com/docker/docker/client/image_load.go54
-rw-r--r--vendor/github.com/docker/docker/client/image_load_opts.go41
-rw-r--r--vendor/github.com/docker/docker/client/image_prune.go35
-rw-r--r--vendor/github.com/docker/docker/client/image_pull.go64
-rw-r--r--vendor/github.com/docker/docker/client/image_push.go73
-rw-r--r--vendor/github.com/docker/docker/client/image_remove.go31
-rw-r--r--vendor/github.com/docker/docker/client/image_save.go41
-rw-r--r--vendor/github.com/docker/docker/client/image_save_opts.go33
-rw-r--r--vendor/github.com/docker/docker/client/image_search.go54
-rw-r--r--vendor/github.com/docker/docker/client/image_tag.go37
-rw-r--r--vendor/github.com/docker/docker/client/info.go26
-rw-r--r--vendor/github.com/docker/docker/client/login.go24
-rw-r--r--vendor/github.com/docker/docker/client/network_connect.go28
-rw-r--r--vendor/github.com/docker/docker/client/network_create.go40
-rw-r--r--vendor/github.com/docker/docker/client/network_disconnect.go28
-rw-r--r--vendor/github.com/docker/docker/client/network_inspect.go47
-rw-r--r--vendor/github.com/docker/docker/client/network_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/network_prune.go35
-rw-r--r--vendor/github.com/docker/docker/client/network_remove.go14
-rw-r--r--vendor/github.com/docker/docker/client/node_inspect.go33
-rw-r--r--vendor/github.com/docker/docker/client/node_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/node_remove.go25
-rw-r--r--vendor/github.com/docker/docker/client/node_update.go22
-rw-r--r--vendor/github.com/docker/docker/client/options.go240
-rw-r--r--vendor/github.com/docker/docker/client/ping.go80
-rw-r--r--vendor/github.com/docker/docker/client/plugin_create.go23
-rw-r--r--vendor/github.com/docker/docker/client/plugin_disable.go23
-rw-r--r--vendor/github.com/docker/docker/client/plugin_enable.go23
-rw-r--r--vendor/github.com/docker/docker/client/plugin_inspect.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_install.go117
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go33
-rw-r--r--vendor/github.com/docker/docker/client/plugin_push.go24
-rw-r--r--vendor/github.com/docker/docker/client/plugin_remove.go25
-rw-r--r--vendor/github.com/docker/docker/client/plugin_set.go17
-rw-r--r--vendor/github.com/docker/docker/client/plugin_upgrade.go47
-rw-r--r--vendor/github.com/docker/docker/client/request.go325
-rw-r--r--vendor/github.com/docker/docker/client/secret_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/secret_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/secret_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/secret_remove.go17
-rw-r--r--vendor/github.com/docker/docker/client/secret_update.go24
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go213
-rw-r--r--vendor/github.com/docker/docker/client/service_inspect.go39
-rw-r--r--vendor/github.com/docker/docker/client/service_list.go39
-rw-r--r--vendor/github.com/docker/docker/client/service_logs.go57
-rw-r--r--vendor/github.com/docker/docker/client/service_remove.go15
-rw-r--r--vendor/github.com/docker/docker/client/service_update.go90
-rw-r--r--vendor/github.com/docker/docker/client/swarm_get_unlock_key.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_init.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_inspect.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_join.go14
-rw-r--r--vendor/github.com/docker/docker/client/swarm_leave.go17
-rw-r--r--vendor/github.com/docker/docker/client/swarm_unlock.go14
-rw-r--r--vendor/github.com/docker/docker/client/swarm_update.go21
-rw-r--r--vendor/github.com/docker/docker/client/task_inspect.go34
-rw-r--r--vendor/github.com/docker/docker/client/task_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/task_logs.go51
-rw-r--r--vendor/github.com/docker/docker/client/utils.go96
-rw-r--r--vendor/github.com/docker/docker/client/version.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_create.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_inspect.go40
-rw-r--r--vendor/github.com/docker/docker/client/volume_list.go33
-rw-r--r--vendor/github.com/docker/docker/client/volume_prune.go35
-rw-r--r--vendor/github.com/docker/docker/client/volume_remove.go34
-rw-r--r--vendor/github.com/docker/docker/client/volume_update.go28
-rw-r--r--vendor/github.com/docker/docker/errdefs/defs.go69
-rw-r--r--vendor/github.com/docker/docker/errdefs/doc.go8
-rw-r--r--vendor/github.com/docker/docker/errdefs/helpers.go305
-rw-r--r--vendor/github.com/docker/docker/errdefs/http_helpers.go47
-rw-r--r--vendor/github.com/docker/docker/errdefs/is.go123
-rw-r--r--vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go90
-rw-r--r--vendor/github.com/docker/docker/internal/multierror/multierror.go46
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1507
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go107
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go126
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go69
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go430
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go281
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go95
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go497
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/dev_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go258
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff_unix.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path.go20
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/path_windows.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time.go38
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_windows.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported.go52
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go5
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools.go149
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go166
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go314
-rw-r--r--vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go190
-rw-r--r--vendor/github.com/docker/go-connections/LICENSE191
-rw-r--r--vendor/github.com/docker/go-connections/nat/nat.go240
-rw-r--r--vendor/github.com/docker/go-connections/nat/parse.go33
-rw-r--r--vendor/github.com/docker/go-connections/nat/sort.go96
-rw-r--r--vendor/github.com/docker/go-connections/sockets/README.md0
-rw-r--r--vendor/github.com/docker/go-connections/sockets/inmem_socket.go81
-rw-r--r--vendor/github.com/docker/go-connections/sockets/proxy.go28
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets.go37
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_unix.go39
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_windows.go28
-rw-r--r--vendor/github.com/docker/go-connections/sockets/tcp_socket.go22
-rw-r--r--vendor/github.com/docker/go-connections/sockets/unix_socket.go126
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool.go16
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config.go261
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go14
-rw-r--r--vendor/github.com/docker/go-units/CONTRIBUTING.md67
-rw-r--r--vendor/github.com/docker/go-units/LICENSE191
-rw-r--r--vendor/github.com/docker/go-units/MAINTAINERS46
-rw-r--r--vendor/github.com/docker/go-units/README.md16
-rw-r--r--vendor/github.com/docker/go-units/circle.yml11
-rw-r--r--vendor/github.com/docker/go-units/duration.go35
-rw-r--r--vendor/github.com/docker/go-units/size.go154
-rw-r--r--vendor/github.com/docker/go-units/ulimit.go123
277 files changed, 37433 insertions, 0 deletions
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
new file mode 100644
index 0000000..88032de
--- /dev/null
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -0,0 +1,2486 @@
+# File @generated by hack/generate-authors.sh. DO NOT EDIT.
+# This file lists all contributors to the repository.
+# See hack/generate-authors.sh to make modifications.
+
+7sunarni <710720732@qq.com>
+Aanand Prasad <aanand.prasad@gmail.com>
+Aarni Koskela <akx@iki.fi>
+Aaron Davidson <aaron@databricks.com>
+Aaron Feng <aaron.feng@gmail.com>
+Aaron Hnatiw <aaron@griddio.com>
+Aaron Huslage <huslage@gmail.com>
+Aaron L. Xu <liker.xu@foxmail.com>
+Aaron Lehmann <alehmann@netflix.com>
+Aaron Welch <welch@packet.net>
+Aaron Yoshitake <airandfingers@gmail.com>
+Abdur Rehman <abdur_rehman@mentor.com>
+Abel Muiño <amuino@gmail.com>
+Abhijeet Kasurde <akasurde@redhat.com>
+Abhinandan Prativadi <aprativadi@gmail.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abhishek Chanda <abhishek.becs@gmail.com>
+Abhishek Sharma <abhishek@asharma.me>
+Abin Shahab <ashahab@altiscale.com>
+Abirdcfly <fp544037857@gmail.com>
+Ada Mancini <ada@docker.com>
+Adam Avilla <aavilla@yp.com>
+Adam Dobrawy <naczelnik@jawnosc.tk>
+Adam Eijdenberg <adam.eijdenberg@gmail.com>
+Adam Kunk <adam.kunk@tiaa-cref.org>
+Adam Lamers <adam.lamers@wmsdev.pl>
+Adam Miller <admiller@redhat.com>
+Adam Mills <adam@armills.info>
+Adam Pointer <adam.pointer@skybettingandgaming.com>
+Adam Simon <adamsimon85100@gmail.com>
+Adam Singer <financeCoding@gmail.com>
+Adam Thornton <adam.thornton@maryville.com>
+Adam Walz <adam@adamwalz.net>
+Adam Williams <awilliams@mirantis.com>
+AdamKorcz <adam@adalogics.com>
+Addam Hardy <addam.hardy@gmail.com>
+Aditi Rajagopal <arajagopal@us.ibm.com>
+Aditya <aditya@netroy.in>
+Adnan Khan <adnkha@amazon.com>
+Adolfo Ochagavía <aochagavia92@gmail.com>
+Adria Casas <adriacasas88@gmail.com>
+Adrian Moisey <adrian@changeover.za.net>
+Adrian Mouat <adrian.mouat@gmail.com>
+Adrian Oprea <adrian@codesi.nz>
+Adrien Folie <folie.adrien@gmail.com>
+Adrien Gallouët <adrien@gallouet.fr>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Feldman <aidan.feldman@gmail.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
+AJ Bowen <aj@soulshake.net>
+Ajey Charantimath <ajey.charantimath@gmail.com>
+ajneu <ajneu@users.noreply.github.com>
+Akash Gupta <akagup@microsoft.com>
+Akhil Mohan <akhil.mohan@mayadata.io>
+Akihiro Matsushima <amatsusbit@gmail.com>
+Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
+Akim Demaille <akim.demaille@docker.com>
+Akira Koyasu <mail@akirakoyasu.net>
+Akshay Karle <akshay.a.karle@gmail.com>
+Akshay Moghe <akshay.moghe@gmail.com>
+Al Tobey <al@ooyala.com>
+alambike <alambike@gmail.com>
+Alan Hoyle <alan@alanhoyle.com>
+Alan Scherger <flyinprogrammer@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Alano Terblanche <alano.terblanche@docker.com>
+Albert Callarisa <shark234@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
+Albin Kerouanton <albinker@gmail.com>
+Alec Benson <albenson@redhat.com>
+Alejandro González Hevia <alejandrgh11@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksandr Chebotov <v-aleche@microsoft.com>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alena Prokharchyk <alena@rancher.com>
+Alessandro Boch <aboch@tetrationanalytics.com>
+Alessio Biancalana <dottorblaster@gmail.com>
+Alex Chan <alex@alexwlchan.net>
+Alex Chen <alexchenunix@gmail.com>
+Alex Coventry <alx@empirical.com>
+Alex Crawford <alex.crawford@coreos.com>
+Alex Ellis <alexellis2@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Goodman <wagoodman@gmail.com>
+Alex Nordlund <alexander.nordlund@nasdaq.com>
+Alex Olshansky <i@creagenics.com>
+Alex Samorukov <samm@os2.kiev.ua>
+Alex Stockinger <alex@atomicjar.com>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Artemenko <svetlyak.40wt@gmail.com>
+Alexander Boyd <alex@opengroove.org>
+Alexander Larsson <alexl@redhat.com>
+Alexander Midlash <amidlash@docker.com>
+Alexander Morozov <lk4d4math@gmail.com>
+Alexander Polakov <plhk@sdf.org>
+Alexander Shopov <ash@kambanaria.org>
+Alexandre Beslic <alexandre.beslic@gmail.com>
+Alexandre Garnier <zigarn@gmail.com>
+Alexandre González <agonzalezro@gmail.com>
+Alexandre Jomin <alexandrejomin@gmail.com>
+Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
+Alexei Margasov <alexei38@yandex.ru>
+Alexey Guskov <lexag@mail.ru>
+Alexey Kotlyarov <alexey@infoxchange.net.au>
+Alexey Shamrin <shamrin@gmail.com>
+Alexis Ries <ries.alexis@gmail.com>
+Alexis Thomas <fr.alexisthomas@gmail.com>
+Alfred Landrum <alfred.landrum@docker.com>
+Ali Dehghani <ali.dehghani.g@gmail.com>
+Alicia Lauerman <alicia@eta.im>
+Alihan Demir <alihan_6153@hotmail.com>
+Allen Madsen <blatyo@gmail.com>
+Allen Sun <allensun.shl@alibaba-inc.com>
+almoehi <almoehi@users.noreply.github.com>
+Alvaro Saurin <alvaro.saurin@gmail.com>
+Alvin Deng <alvin.q.deng@utexas.edu>
+Alvin Richards <alvin.richards@docker.com>
+amangoel <amangoel@gmail.com>
+Amen Belayneh <amenbelayneh@gmail.com>
+Ameya Gawde <agawde@mirantis.com>
+Amir Goldstein <amir73il@aquasec.com>
+AmirBuddy <badinlu.amirhossein@gmail.com>
+Amit Bakshi <ambakshi@gmail.com>
+Amit Krishnan <amit.krishnan@oracle.com>
+Amit Shukla <amit.shukla@docker.com>
+Amr Gawish <amr.gawish@gmail.com>
+Amy Lindburg <amy.lindburg@docker.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Anatoly Borodin <anatoly.borodin@gmail.com>
+Anca Iordache <anca.iordache@docker.com>
+Anchal Agrawal <aagrawa4@illinois.edu>
+Anda Xu <anda.xu@docker.com>
+Anders Janmyr <anders@janmyr.com>
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky <robotciti@live.com>
+Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
+Andreas Elvers <andreas@work.de>
+Andreas Köhler <andi5.py@gmx.net>
+Andreas Savvides <andreas@editd.com>
+Andreas Tiefenthaler <at@an-ti.eu>
+Andrei Gherzan <andrei@resin.io>
+Andrei Ushakov <aushakov@netflix.com>
+Andrei Vagin <avagin@gmail.com>
+Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
+Andrew C. Bodine <acbodine@us.ibm.com>
+Andrew Clay Shafer <andrewcshafer@gmail.com>
+Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
+Andrew Gerrand <adg@golang.org>
+Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew He <he.andrew.mail@gmail.com>
+Andrew Hsu <andrewhsu@docker.com>
+Andrew Kim <taeyeonkim90@gmail.com>
+Andrew Kuklewicz <kookster@gmail.com>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
+Andrew Macpherson <hopscotch23@gmail.com>
+Andrew Martin <sublimino@gmail.com>
+Andrew McDonnell <bugs@andrewmcdonnell.net>
+Andrew Munsell <andrew@wizardapps.net>
+Andrew Pennebaker <andrew.pennebaker@gmail.com>
+Andrew Po <absourd.noise@gmail.com>
+Andrew Weiss <andrew.weiss@docker.com>
+Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
+Andrey Kolomentsev <andrey.kolomentsev@docker.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
+André Martins <aanm90@gmail.com>
+Andrés Maldonado <maldonado@codelutin.com>
+Andy Chambers <anchambers@paypal.com>
+andy diller <dillera@gmail.com>
+Andy Goldstein <agoldste@redhat.com>
+Andy Kipp <andy@rstudio.com>
+Andy Lindeman <alindeman@salesforce.com>
+Andy Rothfusz <github@developersupport.net>
+Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
+Andy Zhang <andy.zhangtao@hotmail.com>
+Aneesh Kulkarni <askthefactorcamera@gmail.com>
+Anes Hasicic <anes.hasicic@gmail.com>
+Angel Velazquez <angelcar@amazon.com>
+Anil Belur <askb23@gmail.com>
+Anil Madhavapeddy <anil@recoil.org>
+Ankit Jain <ajatkj@yahoo.co.in>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anonmily <michelle@michelleliu.io>
+Anran Qiao <anran.qiao@daocloud.io>
+Anshul Pundir <anshul.pundir@docker.com>
+Anthon van der Neut <anthon@mnt.org>
+Anthony Baire <Anthony.Baire@irisa.fr>
+Anthony Bishopric <git@anthonybishopric.com>
+Anthony Dahanne <anthony.dahanne@gmail.com>
+Anthony Sottile <asottile@umich.edu>
+Anton Löfgren <anton.lofgren@gmail.com>
+Anton Nikitin <anton.k.nikitin@gmail.com>
+Anton Polonskiy <anton.polonskiy@gmail.com>
+Anton Tiurin <noxiouz@yandex.ru>
+Antonio Aguilar <antonio@zoftko.com>
+Antonio Murdaca <antonio.murdaca@gmail.com>
+Antonis Kalipetis <akalipetis@gmail.com>
+Antony Messerli <amesserl@rackspace.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anuj Varma <anujvarma@thumbtack.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com>
+Anyu Wang <wanganyu@outlook.com>
+apocas <petermdias@gmail.com>
+Arash Deshmeh <adeshmeh@ca.ibm.com>
+arcosx <arcosx@outlook.com>
+ArikaChen <eaglesora@gmail.com>
+Arko Dasgupta <arko@tetrate.io>
+Arnaud Lefebvre <a.lefebvre@outlook.fr>
+Arnaud Porterie <icecrime@gmail.com>
+Arnaud Rebillout <arnaud.rebillout@collabora.com>
+Artem Khramov <akhramov@pm.me>
+Arthur Barr <arthur.barr@uk.ibm.com>
+Arthur Gautier <baloo@gandi.net>
+Artur Meyster <arthurfbi@yahoo.com>
+Arun Gupta <arun.gupta@gmail.com>
+Asad Saeeduddin <masaeedu@gmail.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
+Ashly Mathew <ashly.mathew@sap.com>
+Austin Vazquez <macedonv@amazon.com>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Das <andas222@gmail.com>
+Avi Kivity <avi@scylladb.com>
+Avi Miller <avi.miller@oracle.com>
+Avi Vaid <avaid1996@gmail.com>
+Azat Khuyiyakhmetov <shadow_uz@mail.ru>
+Bao Yonglei <baoyonglei@huawei.com>
+Bardia Keyoumarsi <bkeyouma@ucsc.edu>
+Barnaby Gray <barnaby@pickle.me.uk>
+Barry Allard <barry.allard@gmail.com>
+Bartłomiej Piotrowski <b@bpiotrowski.pl>
+Bastiaan Bakker <bbakker@xebia.com>
+Bastien Pascard <bpascard@hotmail.com>
+bdevloed <boris.de.vloed@gmail.com>
+Bearice Ren <bearice@gmail.com>
+Ben Bonnefoy <frenchben@docker.com>
+Ben Firshman <ben@firshman.co.uk>
+Ben Golub <ben.golub@dotcloud.com>
+Ben Gould <ben@bengould.co.uk>
+Ben Hall <ben@benhall.me.uk>
+Ben Langfeld <ben@langfeld.me>
+Ben Lovy <ben@deciduously.com>
+Ben Sargent <ben@brokendigits.com>
+Ben Severson <BenSeverson@users.noreply.github.com>
+Ben Toews <mastahyeti@gmail.com>
+Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benjamin Baker <Benjamin.baker@utexas.edu>
+Benjamin Boudreau <boudreau.benjamin@gmail.com>
+Benjamin Böhmke <benjamin@boehmke.net>
+Benjamin Wang <wachao@vmware.com>
+Benjamin Yolken <yolken@stripe.com>
+Benny Ng <benny.tpng@gmail.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bernerd Schaefer <bj.schaefer@gmail.com>
+Bernhard M. Wiedemann <bwiedemann@suse.de>
+Bert Goethals <bert@bertg.be>
+Bertrand Roussel <broussel@sierrawireless.com>
+Bevisy Zhang <binbin36520@gmail.com>
+Bharath Thiruveedula <bharath_ves@hotmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bhumika Bayani <bhumikabayani@gmail.com>
+Bilal Amarni <bilal.amarni@gmail.com>
+Bill Wang <ozbillwang@gmail.com>
+Billy Ridgway <wrridgwa@us.ibm.com>
+Bily Zhang <xcoder@tenxcloud.com>
+Bin Liu <liubin0329@gmail.com>
+Bingshen Wang <bingshen.wbs@alibaba-inc.com>
+Bjorn Neergaard <bjorn@neersighted.com>
+Blake Geno <blakegeno@gmail.com>
+Boaz Shuster <ripcurld.github@gmail.com>
+bobby abbott <ttobbaybbob@gmail.com>
+Bojun Zhu <bojun.zhu@foxmail.com>
+Boqin Qin <bobbqqin@gmail.com>
+Boris Pruessmann <boris@pruessmann.org>
+Boshi Lian <farmer1992@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
+boynux <boynux@gmail.com>
+Bradley Cicenas <bradley.cicenas@gmail.com>
+Bradley Wright <brad@intranation.com>
+Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon.philips@coreos.com>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brendan Dixon <brendand@microsoft.com>
+Brennan Kinney <5098581+polarathene@users.noreply.github.com>
+Brent Salisbury <brent.salisbury@docker.com>
+Brett Higgins <brhiggins@arbor.net>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brett Milford <brettmilford@gmail.com>
+Brett Randall <javabrett@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
+Brian Bland <brian.bland@docker.com>
+Brian DeHamer <brian@dehamer.com>
+Brian Dorsey <brian@dorseys.org>
+Brian Flad <bflad417@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Brian McCallister <brianm@skife.org>
+Brian Olsen <brian@maven-group.org>
+Brian Schwind <brianmschwind@gmail.com>
+Brian Shumate <brian@couchbase.com>
+Brian Torres-Gil <brian@dralth.com>
+Brian Trump <btrump@yelp.com>
+Brice Jaglin <bjaglin@teads.tv>
+Briehan Lombaard <briehan.lombaard@gmail.com>
+Brielle Broder <bbroder@google.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
+Bruno Gazzera <bgazzera@paginar.com>
+Bruno Renié <brutasse@gmail.com>
+Bruno Tavares <btavare@thoughtworks.com>
+Bryan Bess <squarejaw@bsbess.com>
+Bryan Boreham <bjboreham@gmail.com>
+Bryan Matsuo <bryan.matsuo@gmail.com>
+Bryan Murphy <bmurphy1976@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
+Caleb Spare <cespare@gmail.com>
+Calen Pennington <cale@edx.org>
+Calvin Liu <flycalvin@qq.com>
+Cameron Boehmer <cameron.boehmer@gmail.com>
+Cameron Sparr <gh@sparr.email>
+Cameron Spear <cameronspear@gmail.com>
+Campbell Allen <campbell.allen@gmail.com>
+Candid Dauth <cdauth@cdauth.eu>
+Cao Weiwei <cao.weiwei30@zte.com.cn>
+Carl Henrik Lunde <chlunde@ping.uio.no>
+Carl Loa Odin <carlodin@gmail.com>
+Carl X. Su <bcbcarl@gmail.com>
+Carlo Mion <mion00@gmail.com>
+Carlos Alexandro Becker <caarlos0@gmail.com>
+Carlos de Paula <me@carlosedp.com>
+Carlos Sanchez <carlos@apache.org>
+Carol Fager-Higgins <carol.fager-higgins@docker.com>
+Cary <caryhartline@users.noreply.github.com>
+Casey Bisson <casey.bisson@joyent.com>
+Catalin Pirvu <pirvu.catalin94@gmail.com>
+Ce Gao <ce.gao@outlook.com>
+Cedric Davies <cedricda@microsoft.com>
+Cezar Sa Espinola <cezarsa@gmail.com>
+Chad Swenson <chadswen@gmail.com>
+Chance Zibolski <chance.zibolski@gmail.com>
+Chander Govindarajan <chandergovind@gmail.com>
+Chanhun Jeong <keyolk@gmail.com>
+Chao Wang <wangchao.fnst@cn.fujitsu.com>
+Charity Kathure <ckathure@microsoft.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Hooper <charles.hooper@dotcloud.com>
+Charles Law <claw@conduce.com>
+Charles Lindsay <chaz@chazomatic.us>
+Charles Merriam <charles.merriam@gmail.com>
+Charles Sarrazin <charles@sarraz.in>
+Charles Smith <charles.smith@docker.com>
+Charlie Drage <charlie@charliedrage.com>
+Charlie Lewis <charliel@lab41.org>
+Chase Bolt <chase.bolt@gmail.com>
+ChaYoung You <yousbe@gmail.com>
+Chee Hau Lim <ch33hau@gmail.com>
+Chen Chao <cc272309126@gmail.com>
+Chen Chuanliang <chen.chuanliang@zte.com.cn>
+Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
+Chen Min <chenmin46@huawei.com>
+Chen Mingjie <chenmingjie0828@163.com>
+Chen Qiu <cheney-90@hotmail.com>
+Cheng-mean Liu <soccerl@microsoft.com>
+Chengfei Shang <cfshang@alauda.io>
+Chengguang Xu <cgxu519@gmx.com>
+Chentianze <cmoman@126.com>
+Chenyang Yan <memory.yancy@gmail.com>
+chenyuzhu <chenyuzhi@oschina.cn>
+Chetan Birajdar <birajdar.chetan@gmail.com>
+Chewey <prosto-chewey@users.noreply.github.com>
+Chia-liang Kao <clkao@clkao.org>
+Chiranjeevi Tirunagari <vchiranjeeviak.tirunagari@gmail.com>
+chli <chli@freewheel.tv>
+Cholerae Hu <choleraehyq@gmail.com>
+Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+Chris Dias <cdias@microsoft.com>
+Chris Dituri <csdituri@gmail.com>
+Chris Fordham <chris@fordham-nagy.id.au>
+Chris Gavin <chris@chrisgavin.me>
+Chris Gibson <chris@chrisg.io>
+Chris Khoo <chris.khoo@gmail.com>
+Chris Kreussling (Flatbush Gardener) <xrisfg@gmail.com>
+Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
+Chris McKinnel <chrismckinnel@gmail.com>
+Chris Price <cprice@mirantis.com>
+Chris Seto <chriskseto@gmail.com>
+Chris Snow <chsnow123@gmail.com>
+Chris St. Pierre <chris.a.st.pierre@gmail.com>
+Chris Stivers <chris@stivers.us>
+Chris Swan <chris.swan@iee.org>
+Chris Telfer <ctelfer@docker.com>
+Chris Wahl <github@wahlnetwork.com>
+Chris Weyl <cweyl@alumni.drew.edu>
+Chris White <me@cwprogram.com>
+Christian Becker <christian.becker@sixt.com>
+Christian Berendt <berendt@b1-systems.de>
+Christian Brauner <christian.brauner@ubuntu.com>
+Christian Böhme <developement@boehme3d.de>
+Christian Muehlhaeuser <muesli@gmail.com>
+Christian Persson <saser@live.se>
+Christian Rotzoll <ch.rotzoll@gmail.com>
+Christian Simon <simon@swine.de>
+Christian Stefanescu <st.chris@gmail.com>
+Christoph Ziebuhr <chris@codefrickler.de>
+Christophe Mehay <cmehay@online.net>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christophe Vidal <kriss@krizalys.com>
+Christopher Biscardi <biscarch@sketcht.com>
+Christopher Crone <christopher.crone@docker.com>
+Christopher Currie <codemonkey+github@gmail.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Christopher Latham <sudosurootdev@gmail.com>
+Christopher Petito <chrisjpetito@gmail.com>
+Christopher Rigor <crigor@gmail.com>
+Christy Norman <christy@linux.vnet.ibm.com>
+Chun Chen <ramichen@tencent.com>
+Ciro S. Costa <ciro.costa@usp.br>
+Clayton Coleman <ccoleman@redhat.com>
+Clint Armstrong <clint@clintarmstrong.net>
+Clinton Kitson <clintonskitson@gmail.com>
+clubby789 <jamie@hill-daniel.co.uk>
+Cody Roseborough <crrosebo@amazon.com>
+Coenraad Loubser <coenraad@wish.org.za>
+Colin Dunklau <colin.dunklau@gmail.com>
+Colin Hebert <hebert.colin@gmail.com>
+Colin Panisset <github@clabber.com>
+Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
+Collin Guarino <collin.guarino@gmail.com>
+Colm Hally <colmhally@gmail.com>
+companycy <companycy@gmail.com>
+Conor Evans <coevans@tcd.ie>
+Corbin Coleman <corbin.coleman@docker.com>
+Corey Farrell <git@cfware.com>
+Cory Forsyth <cory.forsyth@gmail.com>
+Cory Snider <csnider@mirantis.com>
+cressie176 <github@stephen-cresswell.net>
+Cristian Ariza <dev@cristianrz.com>
+Cristian Staretu <cristian.staretu@gmail.com>
+cristiano balducci <cristiano.balducci@gmail.com>
+Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+cui fliter <imcusg@gmail.com>
+CUI Wei <ghostplant@qq.com>
+Cuong Manh Le <cuong.manhle.vn@gmail.com>
+Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
+Cyril F <cyrilf7x@gmail.com>
+Da McGrady <dabkb@aol.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+Dafydd Crosby <dtcrsby@gmail.com>
+dalanlan <dalanlan925@gmail.com>
+Damian Smyth <damian@dsau.co>
+Damien Nadé <github@livna.org>
+Damien Nozay <damien.nozay@gmail.com>
+Damjan Georgievski <gdamjan@gmail.com>
+Dan Anolik <dan@anolik.net>
+Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Feldman <danf@jfrog.com>
+Dan Griffin <dgriffin@peer1.com>
+Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan Levy <dan@danlevy.net>
+Dan McPherson <dmcphers@redhat.com>
+Dan Plamadeala <cornul11@gmail.com>
+Dan Stine <sw@stinemail.com>
+Dan Williams <me@deedubs.com>
+Dani Hodovic <dani.hodovic@gmail.com>
+Dani Louca <dani.louca@docker.com>
+Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Black <daniel@linux.ibm.com>
+Daniel Dao <dqminh@cloudflare.com>
+Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Daniel Gasienica <daniel@gasienica.ch>
+Daniel Grunwell <mwgrunny@gmail.com>
+Daniel Guns <danbguns@gmail.com>
+Daniel Helfand <helfand.4@gmail.com>
+Daniel Hiltgen <daniel.hiltgen@docker.com>
+Daniel J Walsh <dwalsh@redhat.com>
+Daniel Menet <membership@sontags.ch>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nephin <dnephin@docker.com>
+Daniel Norberg <dano@spotify.com>
+Daniel Nordberg <dnordberg@gmail.com>
+Daniel P. Berrangé <berrange@redhat.com>
+Daniel Robinson <gottagetmac@gmail.com>
+Daniel S <dan.streby@gmail.com>
+Daniel Sweet <danieljsweet@icloud.com>
+Daniel Von Fange <daniel@leancoder.com>
+Daniel Watkins <daniel@daniel-watkins.co.uk>
+Daniel X Moore <yahivin@gmail.com>
+Daniel YC Lin <dlin.tw@gmail.com>
+Daniel Zhang <jmzwcn@gmail.com>
+Daniele Rondina <geaaru@sabayonlinux.org>
+Danny Berger <dpb587@gmail.com>
+Danny Milosavljevic <dannym@scratchpost.org>
+Danny Yates <danny@codeaholics.org>
+Danyal Khaliq <danyal.khaliq@tenpearls.com>
+Darren Coxall <darren@darrencoxall.com>
+Darren Shepherd <darren.s.shepherd@gmail.com>
+Darren Stahl <darst@microsoft.com>
+Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
+Davanum Srinivas <davanum@gmail.com>
+Dave Barboza <dbarboza@datto.com>
+Dave Goodchild <buddhamagnet@gmail.com>
+Dave Henderson <dhenderson@gmail.com>
+Dave MacDonald <mindlapse@gmail.com>
+Dave Tucker <dt@docker.com>
+David Anderson <dave@natulte.net>
+David Bellotti <dbellotti@pivotal.io>
+David Calavera <david.calavera@gmail.com>
+David Chung <david.chung@docker.com>
+David Corking <dmc-source@dcorking.com>
+David Cramer <davcrame@cisco.com>
+David Currie <david_currie@uk.ibm.com>
+David Davis <daviddavis@redhat.com>
+David Dooling <dooling@gmail.com>
+David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
+David Glasser <glasser@davidglasser.net>
+David Karlsson <35727626+dvdksn@users.noreply.github.com>
+David Lawrence <david.lawrence@docker.com>
+David Lechner <david@lechnology.com>
+David M. Karr <davidmichaelkarr@gmail.com>
+David Mackey <tdmackey@booleanhaiku.com>
+David Manouchehri <manouchehri@riseup.net>
+David Mat <david@davidmat.com>
+David Mcanulty <github@hellspark.com>
+David McKay <david@rawkode.com>
+David O'Rourke <david@scalefactory.com>
+David P Hilton <david.hilton.p@gmail.com>
+David Pelaez <pelaez89@gmail.com>
+David R. Jenni <david.r.jenni@gmail.com>
+David Röthlisberger <david@rothlis.net>
+David Sheets <dsheets@docker.com>
+David Sissitka <me@dsissitka.com>
+David Trott <github@davidtrott.com>
+David Wang <00107082@163.com>
+David Williamson <david.williamson@docker.com>
+David Xia <dxia@spotify.com>
+David Young <yangboh@cn.ibm.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
+Dawn Chen <dawnchen@google.com>
+dbdd <wangtong2712@gmail.com>
+dcylabs <dcylabs@gmail.com>
+Debayan De <debayande@users.noreply.github.com>
+Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com>
+deed02392 <georgehafiz@gmail.com>
+Deep Debroy <ddebroy@docker.com>
+Deng Guangxing <dengguangxing@huawei.com>
+Deni Bertovic <deni@kset.org>
+Denis Defreyne <denis@soundcloud.com>
+Denis Gladkikh <denis@gladkikh.email>
+Denis Ollier <larchunix@users.noreply.github.com>
+Dennis Chen <barracks510@gmail.com>
+Dennis Chen <dennis.chen@arm.com>
+Dennis Docter <dennis@d23.nl>
+Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
+Derek Ch <denc716@gmail.com>
+Derek McGowan <derek@mcg.dev>
+Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
+Devon Estes <devon.estes@klarna.com>
+Devvyn Murphy <devvyn@devvyn.com>
+Dharmit Shah <shahdharmit@gmail.com>
+Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
+Dhilip Kumars <dhilip.kumar.s@huawei.com>
+Diego Romero <idiegoromero@gmail.com>
+Diego Siqueira <dieg0@live.com>
+Dieter Reuter <dieter.reuter@me.com>
+Dillon Dixon <dillondixon@gmail.com>
+Dima Stopel <dima@twistlock.com>
+Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
+Dimitris Mandalidis <dimitris.mandalidis@gmail.com>
+Dimitris Rozakis <dimrozakis@gmail.com>
+Dimitry Andric <d.andric@activevideo.com>
+Dinesh Subhraveti <dineshs@altiscale.com>
+Ding Fei <dingfei@stars.org.cn>
+dingwei <dingwei@cmss.chinamobile.com>
+Diogo Monica <diogo@docker.com>
+DiuDiugirl <sophia.wang@pku.edu.cn>
+Djibril Koné <kone.djibril@gmail.com>
+Djordje Lukic <djordje.lukic@docker.com>
+dkumor <daniel@dkumor.com>
+Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
+Dmitri Shuralyov <shurcooL@gmail.com>
+Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry Gusev <dmitry.gusev@gmail.com>
+Dmitry Kononenko <d@dm42.ru>
+Dmitry Sharshakov <d3dx12.xx@gmail.com>
+Dmitry Shyshkin <dmitry@shyshkin.org.ua>
+Dmitry Smirnov <onlyjob@member.fsf.org>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
+Dmitry Vorobev <dimahabr@gmail.com>
+Dmytro Iakovliev <dmytro.iakovliev@zodiacsystems.com>
+docker-unir[bot] <docker-unir[bot]@users.noreply.github.com>
+Dolph Mathews <dolph.mathews@gmail.com>
+Dominic Tubach <dominic.tubach@to.com>
+Dominic Yin <yindongchao@inspur.com>
+Dominik Dingel <dingel@linux.vnet.ibm.com>
+Dominik Finkbeiner <finkes93@gmail.com>
+Dominik Honnef <dominik@honnef.co>
+Don Kirkby <donkirkby@users.noreply.github.com>
+Don Kjer <don.kjer@gmail.com>
+Don Spaulding <donspauldingii@gmail.com>
+Donald Huang <don.hcd@gmail.com>
+Dong Chen <dongluo.chen@docker.com>
+Donghwa Kim <shanytt@gmail.com>
+Donovan Jones <git@gamma.net.nz>
+Dorin Geman <dorin.geman@docker.com>
+Doron Podoleanu <doronp@il.ibm.com>
+Doug Davis <dug@us.ibm.com>
+Doug MacEachern <dougm@vmware.com>
+Doug Tangren <d.tangren@gmail.com>
+Douglas Curtis <dougcurtis1@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
+Dražen Lučanin <kermit666@gmail.com>
+Drew Erny <derny@mirantis.com>
+Drew Hubl <drew.hubl@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Ed Costello <epc@epcostello.com>
+Edmund Wagner <edmund-wagner@web.de>
+Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
+Eivin Giske Skaaren <eivinsn@axis.com>
+Eivind Uggedal <eivind@uggedal.com>
+Elan Ruusamäe <glen@pld-linux.org>
+Elango Sivanandam <elango.siva@docker.com>
+Elena Morozova <lelenanam@gmail.com>
+Eli Uriegas <seemethere101@gmail.com>
+Elias Faxö <elias.faxo@tre.se>
+Elias Koromilas <elias.koromilas@gmail.com>
+Elias Probst <mail@eliasprobst.eu>
+Elijah Zupancic <elijah@zupancic.name>
+eluck <mail@eluck.me>
+Elvir Kuric <elvirkuric@gmail.com>
+Emil Davtyan <emil2k@gmail.com>
+Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
+Emily Rose <emily@contactvibe.com>
+Emir Ozer <emirozer@yandex.com>
+Eng Zer Jun <engzerjun@gmail.com>
+Enguerran <engcolson@gmail.com>
+Enrico Weigelt, metux IT consult <info@metux.net>
+Eohyung Lee <liquidnuker@gmail.com>
+epeterso <epeterson@breakpoint-labs.com>
+er0k <er0k@er0k.net>
+Eric Barch <barch@tomesoftware.com>
+Eric Curtin <ericcurtin17@gmail.com>
+Eric G. Noriega <enoriega@vizuri.com>
+Eric Hanchrow <ehanchrow@ine.com>
+Eric Lee <thenorthsecedes@gmail.com>
+Eric Mountain <eric.mountain@datadoghq.com>
+Eric Myhre <hash@exultant.us>
+Eric Paris <eparis@redhat.com>
+Eric Rafaloff <erafaloff@gmail.com>
+Eric Rosenberg <ehaydenr@gmail.com>
+Eric Sage <eric.david.sage@gmail.com>
+Eric Soderstrom <ericsoderstrom@gmail.com>
+Eric Yang <windfarer@gmail.com>
+Eric-Olivier Lamey <eo@lamey.me>
+Erica Windisch <erica@windisch.us>
+Erich Cordoba <erich.cm@yandex.com>
+Erik Bray <erik.m.bray@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
+Erik Sipsma <erik@sipsma.dev>
+Erik Sjölund <erik.sjolund@gmail.com>
+Erik St. Martin <alakriti@gmail.com>
+Erik Weathers <erikdw@gmail.com>
+Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
+Espen Suenson <mail@espensuenson.dk>
+Ethan Bell <ebgamer29@gmail.com>
+Ethan Mosbaugh <ethan@replicated.com>
+Euan Harris <euan.harris@docker.com>
+Euan Kemp <euan.kemp@coreos.com>
+Eugen Krizo <eugen.krizo@gmail.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
+Evan Allrich <evan@unguku.com>
+Evan Carmi <carmi@users.noreply.github.com>
+Evan Hazlett <ejhazlett@gmail.com>
+Evan Krall <krall@yelp.com>
+Evan Lezar <elezar@nvidia.com>
+Evan Phoenix <evan@fallingsnow.net>
+Evan Wies <evan@neomantra.net>
+Evelyn Xu <evelynhsu21@gmail.com>
+Everett Toews <everett.toews@rackspace.com>
+Evgeniy Makhrov <e.makhrov@corp.badoo.com>
+Evgeny Shmarnev <shmarnev@gmail.com>
+Evgeny Vereshchagin <evvers@ya.ru>
+Ewa Czechowska <ewa@ai-traders.com>
+Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
+ezbercih <cem.ezberci@gmail.com>
+Ezra Silvera <ezra@il.ibm.com>
+Fabian Kramm <kramm@covexo.com>
+Fabian Lauer <kontakt@softwareschmiede-saar.de>
+Fabian Raetz <fabian.raetz@gmail.com>
+Fabiano Rosas <farosas@br.ibm.com>
+Fabio Falci <fabiofalci@gmail.com>
+Fabio Kung <fabio.kung@gmail.com>
+Fabio Rapposelli <fabio@vmware.com>
+Fabio Rehm <fgrehm@gmail.com>
+Fabrizio Regini <freegenie@gmail.com>
+Fabrizio Soppelsa <fsoppelsa@mirantis.com>
+Faiz Khan <faizkhan00@gmail.com>
+falmp <chico.lopes@gmail.com>
+Fangming Fang <fangming.fang@arm.com>
+Fangyuan Gao <21551127@zju.edu.cn>
+fanjiyun <fan.jiyun@zte.com.cn>
+Fareed Dudhia <fareeddudhia@googlemail.com>
+Fathi Boudra <fathi.boudra@linaro.org>
+Federico Gimenez <fgimenez@coit.es>
+Felipe Oliveira <felipeweb.programador@gmail.com>
+Felipe Ruhland <felipe.ruhland@gmail.com>
+Felix Abecassis <fabecassis@nvidia.com>
+Felix Geisendörfer <felix@debuggable.com>
+Felix Hupfeld <felix@quobyte.com>
+Felix Rabe <felix@rabe.io>
+Felix Ruess <felix.ruess@gmail.com>
+Felix Schindler <fschindler@weluse.de>
+Feng Yan <fy2462@gmail.com>
+Fengtu Wang <wangfengtu@huawei.com>
+Ferenc Szabo <pragmaticfrank@gmail.com>
+Fernando <fermayo@gmail.com>
+Fero Volar <alian@alian.info>
+Feroz Salam <feroz.salam@sourcegraph.com>
+Ferran Rodenas <frodenas@gmail.com>
+Filipe Brandenburger <filbranden@google.com>
+Filipe Oliveira <contato@fmoliveira.com.br>
+Filipe Pina <hzlu1ot0@duck.com>
+Flavio Castelli <fcastelli@suse.com>
+Flavio Crisciani <flavio.crisciani@docker.com>
+Florian <FWirtz@users.noreply.github.com>
+Florian Klein <florian.klein@free.fr>
+Florian Maier <marsmensch@users.noreply.github.com>
+Florian Noeding <noeding@adobe.com>
+Florian Schmaus <flo@geekplace.eu>
+Florian Weingarten <flo@hackvalue.de>
+Florin Asavoaie <florin.asavoaie@gmail.com>
+Florin Patan <florinpatan@gmail.com>
+fonglh <fonglh@gmail.com>
+Foysal Iqbal <foysal.iqbal.fb@gmail.com>
+Francesc Campoy <campoy@google.com>
+Francesco Degrassi <francesco.degrassi@optionfactory.net>
+Francesco Mari <mari.francesco@gmail.com>
+Francis Chuang <francis.chuang@boostport.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+Francisco Souza <f@souza.cc>
+Frank Groeneveld <frank@ivaldi.nl>
+Frank Herrmann <fgh@4gh.tv>
+Frank Macreery <frank@macreery.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
+Frank Villaro-Dixon <frank.villarodixon@merkle.com>
+Frank Yang <yyb196@gmail.com>
+François Scala <github@arcenik.net>
+Fred Lifton <fred.lifton@docker.com>
+Frederick F. Kautz IV <fkautz@redhat.com>
+Frederico F. de Oliveira <FreddieOliveira@users.noreply.github.com>
+Frederik Loeffert <frederik@zitrusmedia.de>
+Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
+Freek Kalter <freek@kalteronline.org>
+Frieder Bluemle <frieder.bluemle@gmail.com>
+frobnicaty <92033765+frobnicaty@users.noreply.github.com>
+Frédéric Dalleau <frederic.dalleau@docker.com>
+Fu JinLin <withlin@yeah.net>
+Félix Baylac-Jacqué <baylac.felix@gmail.com>
+Félix Cantournet <felix.cantournet@cloudwatt.com>
+Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
+Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
+Gabriel Goller <gabrielgoller123@gmail.com>
+Gabriel L. Somlo <gsomlo@gmail.com>
+Gabriel Linder <linder.gabriel@gmail.com>
+Gabriel Monroy <gabriel@opdemand.com>
+Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
+Gabriel Tomitsuka <gabriel@tomitsuka.com>
+Gaetan de Villele <gdevillele@gmail.com>
+Galen Sampson <galen.sampson@gmail.com>
+Gang Qiao <qiaohai8866@gmail.com>
+Gareth Rushgrove <gareth@morethanseven.net>
+Garrett Barboza <garrett@garrettbarboza.com>
+Gary Schaetz <gary@schaetzkc.com>
+Gaurav <gaurav.gosec@gmail.com>
+Gaurav Singh <gaurav1086@gmail.com>
+Gaël PORTAY <gael.portay@savoirfairelinux.com>
+Genki Takiuchi <genki@s21g.com>
+GennadySpb <lipenkov@gmail.com>
+Geoff Levand <geoff@infradead.org>
+Geoffrey Bachelet <grosfrais@gmail.com>
+Geon Kim <geon0250@gmail.com>
+George Adams <georgeadams1995@gmail.com>
+George Kontridze <george@bugsnag.com>
+George Ma <mayangang@outlook.com>
+George MacRorie <gmacr31@gmail.com>
+George Xie <georgexsh@gmail.com>
+Georgi Hristozov <georgi@forkbomb.nl>
+Georgy Yakovlev <gyakovlev@gentoo.org>
+Gereon Frey <gereon.frey@dynport.de>
+German DZ <germ@ndz.com.ar>
+Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Gerwim Feiken <g.feiken@tfe.nl>
+Ghislain Bourgeois <ghislain.bourgeois@gmail.com>
+Giampaolo Mancini <giampaolo@trampolineup.com>
+Gianluca Borello <g.borello@gmail.com>
+Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
+Giovan Isa Musthofa <giovanism@outlook.co.id>
+gissehel <public-devgit-dantus@gissehel.org>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Giuseppe Scrivano <gscrivan@redhat.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
+Glyn Normington <gnormington@gopivotal.com>
+GoBella <caili_welcome@163.com>
+Goffert van Gool <goffert@phusion.nl>
+Goldwyn Rodrigues <rgoldwyn@suse.com>
+Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
+Gosuke Miyashita <gosukenator@gmail.com>
+Gou Rao <gou@portworx.com>
+Govinda Fichtner <govinda.fichtner@googlemail.com>
+Grace Choi <grace.54109@gmail.com>
+Grant Millar <rid@cylo.io>
+Grant Reaber <grant.reaber@gmail.com>
+Graydon Hoare <graydon@pobox.com>
+Greg Fausak <greg@tacodata.com>
+Greg Pflaum <gpflaum@users.noreply.github.com>
+Greg Stephens <greg@udon.org>
+Greg Thornton <xdissent@me.com>
+Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com>
+Guilhem Lettron <guilhem+github@lettron.fr>
+Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
+Guillaume J. Charmes <guillaume.charmes@docker.com>
+Gunadhya S. <6939749+gunadhya@users.noreply.github.com>
+Guoqiang QI <guoqiang.qi1@gmail.com>
+guoxiuyan <guoxiuyan@huawei.com>
+Guri <odg0318@gmail.com>
+Gurjeet Singh <gurjeet@singh.im>
+Guruprasad <lgp171188@gmail.com>
+Gustav Sinder <gustav.sinder@gmail.com>
+gwx296173 <gaojing3@huawei.com>
+Günter Zöchbauer <guenter@gzoechbauer.com>
+Haichao Yang <yang.haichao@zte.com.cn>
+haikuoliu <haikuo@amazon.com>
+haining.cao <haining.cao@daocloud.io>
+Hakan Özler <hakan.ozler@kodcu.com>
+Hamish Hutchings <moredhel@aoeu.me>
+Hannes Ljungberg <hannes@5monkeys.se>
+Hans Kristian Flaatten <hans@starefossen.com>
+Hans Rødtang <hansrodtang@gmail.com>
+Hao Shu Wei <haoshuwei24@gmail.com>
+Hao Zhang <21521210@zju.edu.cn>
+Harald Albers <github@albersweb.de>
+Harald Niesche <harald@niesche.de>
+Harley Laue <losinggeneration@gmail.com>
+Harold Cooper <hrldcpr@gmail.com>
+Harrison Turton <harrisonturton@gmail.com>
+Harry Zhang <harryz@hyper.sh>
+Harshal Patil <harshal.patil@in.ibm.com>
+Harshal Patil <harshalp@linux.vnet.ibm.com>
+He Simei <hesimei@zju.edu.cn>
+He Xiaoxi <tossmilestone@gmail.com>
+He Xin <he_xinworld@126.com>
+heartlock <21521209@zju.edu.cn>
+Hector Castro <hectcastro@gmail.com>
+Helen Xie <chenjg@harmonycloud.cn>
+Henning Sprang <henning.sprang@gmail.com>
+Hiroshi Hatake <hatake@clear-code.com>
+Hiroyuki Sasagawa <hs19870702@gmail.com>
+Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie@docker.com>
+Hong Xu <hong@topbug.net>
+Hongbin Lu <hongbin034@gmail.com>
+Hongxu Jia <hongxu.jia@windriver.com>
+Honza Pokorny <me@honza.ca>
+Hsing-Hui Hsu <hsinghui@amazon.com>
+Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
+hsinko <21551195@zju.edu.cn>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huajin Tong <fliterdashen@gmail.com>
+huang-jl <1046678590@qq.com>
+HuanHuan Ye <logindaveye@gmail.com>
+Huanzhong Zhang <zhanghuanzhong90@gmail.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Barrera <hugo@barrera.io>
+Hugo Duncan <hugo@hugoduncan.org>
+Hugo Marisco <0x6875676f@gmail.com>
+Hui Kang <hkang.sunysb@gmail.com>
+Hunter Blanks <hunter@twilio.com>
+huqun <huqun@zju.edu.cn>
+Huu Nguyen <huu@prismskylabs.com>
+Hyeongkyu Lee <hyeongkyu.lee@navercorp.com>
+Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
+Iago López Galeiras <iago@kinvolk.io>
+Ian Bishop <ianbishop@pace7.com>
+Ian Bull <irbull@gmail.com>
+Ian Calvert <ianjcalvert@gmail.com>
+Ian Campbell <ian.campbell@docker.com>
+Ian Chen <ianre657@gmail.com>
+Ian Lee <IanLee1521@gmail.com>
+Ian Main <imain@redhat.com>
+Ian Philpot <ian.philpot@microsoft.com>
+Ian Truslove <ian.truslove@gmail.com>
+Iavael <iavaelooeyt@gmail.com>
+Icaro Seara <icaro.seara@gmail.com>
+Ignacio Capurro <icapurrofagian@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
+Igor Karpovich <i.karpovich@currencysolutions.com>
+Iliana Weller <iweller@amazon.com>
+Ilkka Laukkanen <ilkka@ilkka.io>
+Illia Antypenko <ilya@antipenko.pp.ua>
+Illo Abdulrahim <abdulrahim.illo@nokia.com>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
+Ilya Gusev <mail@igusev.ru>
+Ilya Khlopotov <ilya.khlopotov@gmail.com>
+imalasong <2879499479@qq.com>
+imre Fitos <imre.fitos+github@gmail.com>
+inglesp <peter.inglesby@gmail.com>
+Ingo Gottwald <in.gottwald@gmail.com>
+Innovimax <innovimax@gmail.com>
+Isaac Dupree <antispam@idupree.com>
+Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isaiah Grace <irgkenya4@gmail.com>
+Isao Jonas <isao.jonas@gmail.com>
+Iskander Sharipov <quasilyte@gmail.com>
+Ivan Babrou <ibobrik@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
+Ivan Grcic <igrcic@gmail.com>
+Ivan Markin <sw@nogoegst.net>
+J Bruni <joaohbruni@yahoo.com.br>
+J. Nunn <jbnunn@gmail.com>
+Jack Danger Canty <jackdanger@squareup.com>
+Jack Laxson <jackjrabbit@gmail.com>
+Jack Walker <90711509+j2walker@users.noreply.github.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
+Jacob Tomlinson <jacob@tom.linson.uk>
+Jacob Vallejo <jakeev@amazon.com>
+Jacob Wen <jian.w.wen@oracle.com>
+Jaime Cepeda <jcepedavillamayor@gmail.com>
+Jaivish Kothari <janonymous.codevulture@gmail.com>
+Jake Champlin <jake.champlin.27@gmail.com>
+Jake Moshenko <jake@devtable.com>
+Jake Sanders <jsand@google.com>
+Jakub Drahos <jdrahos@pulsepoint.com>
+Jakub Guzik <jakubmguzik@gmail.com>
+James Allen <jamesallen0108@gmail.com>
+James Carey <jecarey@us.ibm.com>
+James Carr <james.r.carr@gmail.com>
+James DeFelice <james.defelice@ishisystems.com>
+James Harrison Fisher <jameshfisher@gmail.com>
+James Kyburz <james.kyburz@gmail.com>
+James Kyle <james@jameskyle.org>
+James Lal <james@lightsofapollo.com>
+James Mills <prologic@shortcircuit.net.au>
+James Nesbitt <jnesbitt@mirantis.com>
+James Nugent <james@jen20.com>
+James Sanders <james3sanders@gmail.com>
+James Turnbull <james@lovedthanlost.net>
+James Watkins-Harvey <jwatkins@progi-media.com>
+Jameson Hyde <jameson.hyde@docker.com>
+Jamie Hannaford <jamie@limetree.org>
+Jamshid Afshar <jafshar@yahoo.com>
+Jan Breig <git@pygos.space>
+Jan Chren <dev.rindeal@gmail.com>
+Jan Garcia <github-public@n-garcia.com>
+Jan Götte <jaseg@jaseg.net>
+Jan Keromnes <janx@linux.com>
+Jan Koprowski <jan.koprowski@gmail.com>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jan-Gerd Tenberge <janten@gmail.com>
+Jan-Jaap Driessen <janjaapdriessen@gmail.com>
+Jana Radhakrishnan <mrjana@docker.com>
+Jannick Fahlbusch <git@jf-projects.de>
+Januar Wayong <januar@gmail.com>
+Jared Biel <jared.biel@bolderthinking.com>
+Jared Hocutt <jaredh@netapp.com>
+Jaroslav Jindrak <dzejrou@gmail.com>
+Jaroslaw Zabiello <hipertracker@gmail.com>
+Jasmine Hegman <jasmine@jhegman.com>
+Jason A. Donenfeld <Jason@zx2c4.com>
+Jason Divock <jdivock@gmail.com>
+Jason Giedymin <jasong@apache.org>
+Jason Green <Jason.Green@AverInformatics.Com>
+Jason Hall <imjasonh@gmail.com>
+Jason Heiss <jheiss@aput.net>
+Jason Livesay <ithkuil@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jason Plum <jplum@devonit.com>
+Jason Shepherd <jason@jasonshepherd.net>
+Jason Smith <jasonrichardsmith@gmail.com>
+Jason Sommer <jsdirv@gmail.com>
+Jason Stangroome <jason@codeassassin.com>
+Jasper Siepkes <siepkes@serviceplanet.nl>
+Javier Bassi <javierbassi@gmail.com>
+jaxgeller <jacksongeller@gmail.com>
+Jay <teguhwpurwanto@gmail.com>
+Jay Kamat <github@jgkamat.33mail.com>
+Jay Lim <jay@imjching.com>
+Jean Rouge <rougej+github@gmail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Christophe Berthon <huygens@berthon.eu>
+Jean-Michel Rouet <jm.rouet@gmail.com>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeeva S. Chelladhurai <sjeeva@gmail.com>
+Jeff Anderson <jeff@docker.com>
+Jeff Hajewski <jeff.hajewski@gmail.com>
+Jeff Johnston <jeff.johnston.mn@gmail.com>
+Jeff Lindsay <progrium@gmail.com>
+Jeff Mickey <j@codemac.net>
+Jeff Minard <jeff@creditkarma.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com>
+Jeff Silberman <jsilberm@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeff Zvier <zvier20@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeffrey Morgan <jmorganca@gmail.com>
+Jeffrey van Gogh <jvg@google.com>
+Jenny Gebske <jennifer@gebske.de>
+Jeongseok Kang <piono623@naver.com>
+Jeremy Chambers <jeremy@thehipbot.com>
+Jeremy Grosser <jeremy@synack.me>
+Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
+Jeremy Price <jprice.rhit@gmail.com>
+Jeremy Qian <vanpire110@163.com>
+Jeremy Unruh <jeremybunruh@gmail.com>
+Jeremy Yallop <yallop@docker.com>
+Jeroen Franse <jeroenfranse@gmail.com>
+Jeroen Jacobs <github@jeroenj.be>
+Jesse Dearing <jesse.dearing@gmail.com>
+Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <jess@oxide.computer>
+Jeyanthinath Muthuram <jeyanthinath10@gmail.com>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
+Jhon Honce <jhonce@redhat.com>
+Ji.Zhilong <zhilongji@gmail.com>
+Jian Liao <jliao@alauda.io>
+Jian Zeng <anonymousknight96@gmail.com>
+Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
+Jiang Jinyang <jjyruby@gmail.com>
+Jianyong Wu <jianyong.wu@arm.com>
+Jie Luo <luo612@zju.edu.cn>
+Jie Ma <jienius@outlook.com>
+Jihyun Hwang <jhhwang@telcoware.com>
+Jilles Oldenbeuving <ojilles@gmail.com>
+Jim Alateras <jima@comware.com.au>
+Jim Carroll <jim.carroll@docker.com>
+Jim Ehrismann <jim.ehrismann@docker.com>
+Jim Galasyn <jim.galasyn@docker.com>
+Jim Lin <b04705003@ntu.edu.tw>
+Jim Minter <jminter@redhat.com>
+Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jimmy Puckett <jimmy.puckett@spinen.com>
+Jimmy Song <rootsongjc@gmail.com>
+jinjiadu <jinjiadu@aliyun.com>
+Jinsoo Park <cellpjs@gmail.com>
+Jintao Zhang <zhangjintao9020@gmail.com>
+Jiri Appl <jiria@microsoft.com>
+Jiri Popelka <jpopelka@redhat.com>
+Jiuyue Ma <majiuyue@huawei.com>
+Jiří Župka <jzupka@redhat.com>
+jjimbo137 <115816493+jjimbo137@users.noreply.github.com>
+Joakim Roubert <joakim.roubert@axis.com>
+Joan Grau <grautxo.dev@proton.me>
+Joao Fernandes <joao.fernandes@docker.com>
+Joao Trindade <trindade.joao@gmail.com>
+Joe Beda <joe.github@bedafamily.com>
+Joe Doliner <jdoliner@pachyderm.io>
+Joe Ferguson <joe@infosiftr.com>
+Joe Gordon <joe.gordon0@gmail.com>
+Joe Shaw <joe@joeshaw.org>
+Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
+Joel Hansson <joel.hansson@ecraft.com>
+Joel Wurtz <jwurtz@jolicode.com>
+Joey Geiger <jgeiger@gmail.com>
+Joey Geiger <jgeiger@users.noreply.github.com>
+Joey Gibson <joey@joeygibson.com>
+Joffrey F <joffrey@docker.com>
+Johan Euphrosine <proppy@google.com>
+Johan Rydberg <johan.rydberg@gmail.com>
+Johanan Lieberman <johanan.lieberman@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
+John Costa <john.costa@gmail.com>
+John Feminella <jxf@jxf.me>
+John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
+John Harris <john@johnharris.io>
+John Howard <github@lowenna.com>
+John Laswell <john.n.laswell@gmail.com>
+John Maguire <jmaguire@duosecurity.com>
+John Mulhausen <john@docker.com>
+John OBrien III <jobrieniii@yahoo.com>
+John Starks <jostarks@microsoft.com>
+John Stephens <johnstep@docker.com>
+John Tims <john.k.tims@gmail.com>
+John V. Martinez <jvmatl@gmail.com>
+John Warwick <jwarwick@gmail.com>
+John Willis <john.willis@docker.com>
+Jon Johnson <jonjohnson@google.com>
+Jon Surrell <jon.surrell@gmail.com>
+Jon Wedaman <jweede@gmail.com>
+Jonas Dohse <jonas@dohse.ch>
+Jonas Geiler <git@jonasgeiler.com>
+Jonas Heinrich <Jonas@JonasHeinrich.com>
+Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Schweder <jonathanschweder@gmail.com>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
+Jonathan Choy <jonathan.j.choy@gmail.com>
+Jonathan Dowland <jon+github@alcopop.org>
+Jonathan Lebon <jlebon@redhat.com>
+Jonathan Lomas <jonathan@floatinglomas.ca>
+Jonathan McCrohan <jmccrohan@gmail.com>
+Jonathan Mueller <j.mueller@apoveda.ch>
+Jonathan Pares <jonathanpa@users.noreply.github.com>
+Jonathan Rudenberg <jonathan@titanous.com>
+Jonathan Stoppani <jonathan.stoppani@divio.com>
+Jonh Wendell <jonh.wendell@redhat.com>
+Joni Sar <yoni@cocycles.com>
+Joost Cassee <joost@cassee.net>
+Jordan Arentsen <blissdev@gmail.com>
+Jordan Jennings <jjn2009@gmail.com>
+Jordan Sissel <jls@semicomplete.com>
+Jordi Massaguer Pla <jmassaguerpla@suse.de>
+Jorge Marin <chipironcin@users.noreply.github.com>
+Jorit Kleine-Möllhoff <joppich@bricknet.de>
+Jose Diaz-Gonzalez <email@josediazgonzalez.com>
+Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
+Joseph Hager <ajhager@gmail.com>
+Joseph Kern <jkern@semafour.net>
+Joseph Rothrock <rothrock@rothrock.org>
+Josh <jokajak@gmail.com>
+Josh Bodah <jb3689@yahoo.com>
+Josh Bonczkowski <josh.bonczkowski@gmail.com>
+Josh Chorlton <jchorlton@gmail.com>
+Josh Eveleth <joshe@opendns.com>
+Josh Hawn <josh.hawn@docker.com>
+Josh Horwitz <horwitz@addthis.com>
+Josh Poimboeuf <jpoimboe@redhat.com>
+Josh Soref <jsoref@gmail.com>
+Josh Wilson <josh.wilson@fivestars.com>
+Josiah Kiehl <jkiehl@riotgames.com>
+José Tomás Albornoz <jojo@eljojo.net>
+Joyce Jang <mail@joycejang.com>
+JP <jpellerin@leapfrogonline.com>
+JSchltggr <jschltggr@gmail.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
+Julien Barbier <write0@gmail.com>
+Julien Bisconti <veggiemonk@users.noreply.github.com>
+Julien Bordellier <julienbordellier@gmail.com>
+Julien Dubois <julien.dubois@gmail.com>
+Julien Kassar <github@kassisol.com>
+Julien Maitrehenry <julien.maitrehenry@me.com>
+Julien Pervillé <julien.perville@perfect-memory.com>
+Julien Pivotto <roidelapluie@inuits.eu>
+Julio Guerra <julio@sqreen.com>
+Julio Montes <imc.coder@gmail.com>
+Jun Du <dujun5@huawei.com>
+Jun-Ru Chang <jrjang@gmail.com>
+junxu <xujun@cmss.chinamobile.com>
+Jussi Nummelin <jussi.nummelin@gmail.com>
+Justas Brazauskas <brazauskasjustas@gmail.com>
+Justen Martin <jmart@the-coder.com>
+Justin Chadwell <me@jedevc.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Force <justin.force@gmail.com>
+Justin Keller <85903732+jk-vb@users.noreply.github.com>
+Justin Menga <justin.menga@gmail.com>
+Justin Plock <jplock@users.noreply.github.com>
+Justin Simonelis <justin.p.simonelis@gmail.com>
+Justin Terry <juterry@microsoft.com>
+Justyn Temme <justyntemme@gmail.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
+Jérémy Leherpeur <amenophis@leherpeur.net>
+Jérôme Petazzoni <jerome.petazzoni@docker.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+K. Heller <pestophagous@gmail.com>
+Kai Blin <kai@samba.org>
+Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
+Kaijie Chen <chen@kaijie.org>
+Kamil Domański <kamil@domanski.co>
+Kamjar Gerami <kami.gerami@gmail.com>
+Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
+Kara Alexandra <kalexandra@us.ibm.com>
+Karan Lyons <karan@karanlyons.com>
+Kareem Khazem <karkhaz@karkhaz.com>
+kargakis <kargakis@users.noreply.github.com>
+Karl Grzeszczak <karlgrz@gmail.com>
+Karol Duleba <mr.fuxi@gmail.com>
+Karthik Karanth <karanth.karthik@gmail.com>
+Karthik Nayak <karthik.188@gmail.com>
+Kasper Fabæch Brandt <poizan@poizan.dk>
+Kate Heddleston <kate.heddleston@gmail.com>
+Katie McLaughlin <katie@glasnt.com>
+Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
+Katrina Owen <katrina.owen@gmail.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
+Kay Yan <kay.yan@daocloud.io>
+kayrus <kay.diam@gmail.com>
+Kazuhiro Sera <seratch@gmail.com>
+Kazuyoshi Kato <katokazu@amazon.com>
+Ke Li <kel@splunk.com>
+Ke Xu <leonhartx.k@gmail.com>
+Kei Ohmura <ohmura.kei@gmail.com>
+Keith Hudgins <greenman@greenman.org>
+Keli Hu <dev@keli.hu>
+Ken Bannister <kb2ma@runbox.com>
+Ken Cochrane <kencochrane@gmail.com>
+Ken Herner <kherner@progress.com>
+Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Ken Reese <krrgithub@gmail.com>
+Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
+Kenjiro Nakayama <nakayamakenjiro@gmail.com>
+Kent Johnson <kentoj@gmail.com>
+Kenta Tada <Kenta.Tada@sony.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
+Kevin Alvarez <github@crazymax.dev>
+Kevin Burke <kev@inburke.com>
+Kevin Clark <kevin.clark@gmail.com>
+Kevin Feyrer <kevin.feyrer@btinternet.com>
+Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Jing Qiu <kevin@idempotent.ca>
+Kevin Kern <kaiwentan@harmonycloud.cn>
+Kevin Menard <kevin@nirvdrum.com>
+Kevin Meredith <kevin.m.meredith@gmail.com>
+Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
+Kevin Parsons <kevpar@microsoft.com>
+Kevin Richardson <kevin@kevinrichardson.co>
+Kevin Shi <kshi@andrew.cmu.edu>
+Kevin Wallace <kevin@pentabarf.net>
+Kevin Yap <me@kevinyap.ca>
+Keyvan Fatehi <keyvanfatehi@gmail.com>
+kies <lleelm@gmail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+Kim Eik <kim@heldig.org>
+Kimbro Staken <kstaken@kstaken.com>
+Kir Kolyshkin <kolyshkin@gmail.com>
+Kiran Gangadharan <kiran.daredevil@gmail.com>
+Kirill SIbirev <l0kix2@gmail.com>
+Kirk Easterson <kirk.easterson@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
+Koichi Shiraishi <k@zchee.io>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
+Konrad Ponichtera <konpon96@gmail.com>
+Konstantin Gribov <grossws@gmail.com>
+Konstantin L <sw.double@gmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+Kostadin Plachkov <k.n.plachkov@gmail.com>
+kpcyrd <git@rxv.cc>
+Krasi Georgiev <krasi@vip-consult.solutions>
+Krasimir Georgiev <support@vip-consult.co.uk>
+Kris-Mikael Krister <krismikael@protonmail.com>
+Kristian Haugene <kristian.haugene@capgemini.com>
+Kristina Zabunova <triara.xiii@gmail.com>
+Krystian Wojcicki <kwojcicki@sympatico.ca>
+Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
+Kunal Tyagi <tyagi.kunal@live.com>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+Kyle Linden <linden.kyle@gmail.com>
+Kyle Squizzato <ksquizz@gmail.com>
+Kyle Wuolle <kyle.wuolle@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
+Lai Jiangshan <jiangshanlai@gmail.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
+Lalatendu Mohanty <lmohanty@redhat.com>
+Lance Chen <cyen0312@gmail.com>
+Lance Kinley <lkinley@loyaltymethods.com>
+Lars Andringa <l.s.andringa@rug.nl>
+Lars Butler <Lars.Butler@gmail.com>
+Lars Kellogg-Stedman <lars@redhat.com>
+Lars R. Damerow <lars@pixar.com>
+Lars-Magnus Skog <ralphtheninja@riseup.net>
+Laszlo Meszaros <lacienator@gmail.com>
+Laura Brehm <laurabrehm@hey.com>
+Laura Frank <ljfrank@gmail.com>
+Laurent Bernaille <laurent.bernaille@datadoghq.com>
+Laurent Erignoux <lerignoux@gmail.com>
+Laurent Goderre <laurent.goderre@docker.com>
+Laurie Voss <github@seldo.com>
+Leandro Motta Barros <lmb@stackedboxes.org>
+Leandro Siqueira <leandro.siqueira@gmail.com>
+Lee Calcote <leecalcote@gmail.com>
+Lee Chao <932819864@qq.com>
+Lee, Meng-Han <sunrisedm4@gmail.com>
+Lei Gong <lgong@alauda.io>
+Lei Jitang <leijitang@huawei.com>
+Leiiwang <u2takey@gmail.com>
+Len Weincier <len@cloudafrica.net>
+Lennie <github@consolejunkie.net>
+Leo Gallucci <elgalu3@gmail.com>
+Leonardo Nodari <me@leonardonodari.it>
+Leonardo Taccari <leot@NetBSD.org>
+Leszek Kowalski <github@leszekkowalski.pl>
+Levi Blackstone <levi.blackstone@rackspace.com>
+Levi Gross <levi@levigross.com>
+Levi Harrison <levisamuelharrison@gmail.com>
+Lewis Daly <lewisdaly@me.com>
+Lewis Marshall <lewis@lmars.net>
+Lewis Peckover <lew+github@lew.io>
+Li Yi <denverdino@gmail.com>
+Liam Macgillavry <liam@kumina.nl>
+Liana Lo <liana.lixia@gmail.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
+Liang-Chi Hsieh <viirya@gmail.com>
+liangwei <liangwei14@huawei.com>
+Liao Qingwei <liaoqingwei@huawei.com>
+Lifubang <lifubang@acmcoder.com>
+Lihua Tang <lhtang@alauda.io>
+Lily Guo <lily.guo@docker.com>
+limeidan <limeidan@loongson.cn>
+Lin Lu <doraalin@163.com>
+LingFaKe <lingfake@huawei.com>
+Linus Heckemann <lheckemann@twig-world.com>
+Liran Tal <liran.tal@gmail.com>
+Liron Levin <liron@twistlock.com>
+Liu Bo <bo.li.liu@oracle.com>
+Liu Hua <sdu.liu@huawei.com>
+liwenqi <vikilwq@zju.edu.cn>
+lixiaobing10051267 <li.xiaobing1@zte.com.cn>
+Liz Zhang <lizzha@microsoft.com>
+LIZAO LI <lzlarryli@gmail.com>
+Lizzie Dixon <_@lizzie.io>
+Lloyd Dewolf <foolswisdom@gmail.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
+longliqiang88 <394564827@qq.com>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
+Lorenzo Fontana <fontanalorenz@gmail.com>
+Lotus Fenn <fenn.lotus@gmail.com>
+Louis Delossantos <ldelossa.ld@gmail.com>
+Louis Opter <kalessin@kalessin.fr>
+Luboslav Pivarc <lpivarc@redhat.com>
+Luca Favatella <luca.favatella@erlang-solutions.com>
+Luca Marturana <lucamarturana@gmail.com>
+Luca Orlandi <luca.orlandi@gmail.com>
+Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
+Lucas Chan <lucas-github@lucaschan.com>
+Lucas Chi <lucas@teacherspayteachers.com>
+Lucas Molas <lmolas@fundacionsadosky.org.ar>
+Lucas Silvestre <lukas.silvestre@gmail.com>
+Luciano Mores <leslau@gmail.com>
+Luis Henrique Mulinari <luis.mulinari@gmail.com>
+Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
+Luiz Svoboda <luizek@gmail.com>
+Lukas Heeren <lukas-heeren@hotmail.com>
+Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
+lukaspustina <lukas.pustina@centerdevice.com>
+Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
+Luke Marsden <me@lukemarsden.net>
+Lyn <energylyn@zju.edu.cn>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+Lénaïc Huard <lhuard@amadeus.com>
+Ma Müller <mueller-ma@users.noreply.github.com>
+Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
+Mabin <bin.ma@huawei.com>
+Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
+Madhav Puri <madhav.puri@gmail.com>
+Madhu Venugopal <mavenugo@gmail.com>
+Mageee <fangpuyi@foxmail.com>
+maggie44 <64841595+maggie44@users.noreply.github.com>
+Mahesh Tiyyagura <tmahesh@gmail.com>
+malnick <malnick@gmail..com>
+Malte Janduda <mail@janduda.net>
+Manfred Touron <m@42.am>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
+Manjunath A Kumatagi <mkumatag@in.ibm.com>
+Mansi Nahar <mmn4185@rit.edu>
+Manuel Meurer <manuel@krautcomputing.com>
+Manuel Rüger <manuel@rueg.eu>
+Manuel Woelker <github@manuel.woelker.org>
+mapk0y <mapk0y@gmail.com>
+Marat Radchenko <marat@slonopotamus.org>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
+Marcel Edmund Franke <marcel.edmund.franke@gmail.com>
+Marcelo Horacio Fortino <info@fortinux.com>
+Marcelo Salazar <chelosalazar@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
+Marcus Cobden <mcobden@cisco.com>
+Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Linke <marcus.linke@gmx.de>
+Marcus Martins <marcus@docker.com>
+Marcus Ramberg <marcus@nordaaker.com>
+Marek Goldmann <marek.goldmann@gmail.com>
+Marian Marinov <mm@yuhu.biz>
+Marianna Tessel <mtesselh@gmail.com>
+Mario Loriedo <mario.loriedo@gmail.com>
+Marius Gundersen <me@mariusgundersen.net>
+Marius Sturm <marius@graylog.com>
+Marius Voila <marius.voila@gmail.com>
+Mark Allen <mrallen1@yahoo.com>
+Mark Feit <mfeit@internet2.edu>
+Mark Jeromin <mark.jeromin@sysfrog.net>
+Mark McGranaghan <mmcgrana@gmail.com>
+Mark McKinstry <mmckinst@umich.edu>
+Mark Milstein <mark@epiloque.com>
+Mark Oates <fl0yd@me.com>
+Mark Parker <godefroi@users.noreply.github.com>
+Mark Vainomaa <mikroskeem@mikroskeem.eu>
+Mark West <markewest@gmail.com>
+Markan Patel <mpatel678@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
+Markus Fix <lispmeister@gmail.com>
+Markus Kortlang <hyp3rdino@googlemail.com>
+Martijn Dwars <ikben@martijndwars.nl>
+Martijn van Oosterhout <kleptog@svana.org>
+Martin Braun <braun@neuroforge.de>
+Martin Dojcak <martin.dojcak@lablabs.io>
+Martin Honermeyer <maze@strahlungsfrei.de>
+Martin Jirku <martin@jirku.sk>
+Martin Kelly <martin@surround.io>
+Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
+Martin Muzatko <martin@happy-css.com>
+Martin Redmond <redmond.martin@gmail.com>
+Maru Newby <mnewby@thesprawl.net>
+Mary Anthony <mary.anthony@docker.com>
+Masahito Zembutsu <zembutsu@users.noreply.github.com>
+Masato Ohba <over.rye@gmail.com>
+Masayuki Morita <minamijoyo@gmail.com>
+Mason Malone <mason.malone@gmail.com>
+Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
+Mathieu Champlon <mathieu.champlon@docker.com>
+Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
+Mathieu Parent <math.parent@gmail.com>
+Mathieu Paturel <mathieu.paturel@gmail.com>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matt Bajor <matt@notevenremotelydorky.com>
+Matt Bentley <matt.bentley@docker.com>
+Matt Haggard <haggardii@gmail.com>
+Matt Hoyle <matt@deployable.co>
+Matt McCormick <matt.mccormick@kitware.com>
+Matt Moore <mattmoor@google.com>
+Matt Morrison <3maven@gmail.com>
+Matt Richardson <matt@redgumtech.com.au>
+Matt Rickard <mrick@google.com>
+Matt Robenolt <matt@ydekproductions.com>
+Matt Schurenko <matt.schurenko@gmail.com>
+Matt Williams <mattyw@me.com>
+Matthew Heon <mheon@redhat.com>
+Matthew Lapworth <matthewl@bit-shift.net>
+Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mosesohn <raytrac3r@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
+Matthias Klumpp <matthias@tenstral.net>
+Matthias Kühnle <git.nivoc@neverbox.com>
+Matthias Rampke <mr@soundcloud.com>
+Matthieu Fronton <m@tthieu.fr>
+Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
+Mattias Jernberg <nostrad@gmail.com>
+Mauricio Garavaglia <mauricio@medallia.com>
+mauriyouth <mauriyouth@gmail.com>
+Max Harmathy <max.harmathy@web.de>
+Max Shytikov <mshytikov@gmail.com>
+Max Timchenko <maxvt@pagerduty.com>
+Maxim Fedchyshyn <sevmax@gmail.com>
+Maxim Ivanov <ivanov.maxim@gmail.com>
+Maxim Kulkin <mkulkin@mirantis.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
+Maximiliano Maccanti <maccanti@amazon.com>
+Maxwell <csuhp007@gmail.com>
+Meaglith Ma <genedna@gmail.com>
+meejah <meejah@meejah.ca>
+Megan Kostick <mkostick@us.ibm.com>
+Mehul Kar <mehul.kar@gmail.com>
+Mei ChunTao <mei.chuntao@zte.com.cn>
+Mengdi Gao <usrgdd@gmail.com>
+Menghui Chen <menghui.chen@alibaba-inc.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
+mgniu <mgniu@dataman-inc.com>
+Micah Zoltu <micah@newrelic.com>
+Michael A. Smith <michael@smith-li.com>
+Michael Beskin <mrbeskin@gmail.com>
+Michael Bridgen <mikeb@squaremobius.net>
+Michael Brown <michael@netdirect.ca>
+Michael Chiang <mchiang@docker.com>
+Michael Crosby <crosbymichael@gmail.com>
+Michael Currie <mcurrie@bruceforceresearch.com>
+Michael Friis <friism@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Michael Grauer <michael.grauer@kitware.com>
+Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Michael Hudson-Doyle <michael.hudson@canonical.com>
+Michael Huettermann <michael@huettermann.net>
+Michael Irwin <mikesir87@gmail.com>
+Michael Kebe <michael.kebe@hkm.de>
+Michael Kuehn <micha@kuehn.io>
+Michael Käufl <docker@c.michael-kaeufl.de>
+Michael Neale <michael.neale@gmail.com>
+Michael Nussbaum <michael.nussbaum@getbraintree.com>
+Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
+Michael Spetsiotis <michael_spets@hotmail.com>
+Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michael Weidmann <michaelweidmann@web.de>
+Michael West <mwest@mdsol.com>
+Michael Zhao <michael.zhao@arm.com>
+Michal Fojtik <mfojtik@redhat.com>
+Michal Gebauer <mishak@mishak.net>
+Michal Jemala <michal.jemala@gmail.com>
+Michal Kostrzewa <michal.kostrzewa@codilime.com>
+Michal Minář <miminar@redhat.com>
+Michal Rostecki <mrostecki@opensuse.org>
+Michal Wieczorek <wieczorek-michal@wp.pl>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michał Czeraszkiewicz <czerasz@gmail.com>
+Michał Gryko <github@odkurzacz.org>
+Michał Kosek <mihao@users.noreply.github.com>
+Michiel de Jong <michiel@unhosted.org>
+Mickaël Fortunato <morsi.morsicus@gmail.com>
+Mickaël Remars <mickael@remars.com>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Miguel Morales <mimoralea@gmail.com>
+Miguel Perez <miguel@voyat.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
+Mikael Davranche <mikael.davranche@corp.ovh.com>
+Mike Brown <brownwm@us.ibm.com>
+Mike Bush <mpbush@gmail.com>
+Mike Casas <mkcsas0@gmail.com>
+Mike Chelen <michael.chelen@gmail.com>
+Mike Danese <mikedanese@google.com>
+Mike Dillon <mike@embody.org>
+Mike Dougherty <mike.dougherty@docker.com>
+Mike Estes <mike.estes@logos.com>
+Mike Gaffney <mike@uberu.com>
+Mike Goelzer <mike.goelzer@docker.com>
+Mike Leone <mleone896@gmail.com>
+Mike Lundy <mike@fluffypenguin.org>
+Mike MacCana <mike.maccana@gmail.com>
+Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
+Mike Sul <mike.sul@foundries.io>
+mikelinjie <294893458@qq.com>
+Mikhail Sobolev <mss@mawhrin.net>
+Miklos Szegedi <miklos.szegedi@cloudera.com>
+Milas Bowman <devnull@milas.dev>
+Milind Chawre <milindchawre@gmail.com>
+Miloslav Trmač <mitr@redhat.com>
+mingqing <limingqing@cyou-inc.com>
+Mingzhen Feng <fmzhen@zju.edu.cn>
+Misty Stanley-Jones <misty@docker.com>
+Mitch Capper <mitch.capper@gmail.com>
+Mizuki Urushida <z11111001011@gmail.com>
+mlarcher <github@ringabell.org>
+Mohammad Banikazemi <MBanikazemi@gmail.com>
+Mohammad Nasirifar <farnasirim@gmail.com>
+Mohammed Aaqib Ansari <maaquib@gmail.com>
+Mohd Sadiq <mohdsadiq058@gmail.com>
+Mohit Soni <mosoni@ebay.com>
+Moorthy RS <rsmoorthy@gmail.com>
+Morgan Bauer <mbauer@us.ibm.com>
+Morgante Pell <morgante.pell@morgante.net>
+Morgy93 <thomas@ulfertsprygoda.de>
+Morten Siebuhr <sbhr@sbhr.dk>
+Morton Fox <github@qslw.com>
+Moysés Borges <moysesb@gmail.com>
+mrfly <mr.wrfly@gmail.com>
+Mrunal Patel <mrunalp@gmail.com>
+Muayyad Alsadi <alsadi@gmail.com>
+Muhammad Zohaib Aslam <zohaibse011@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
+Muthukumar R <muthur@gmail.com>
+Myeongjoon Kim <kimmj8409@gmail.com>
+Máximo Cuadros <mcuadros@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nace Oroz <orkica@gmail.com>
+Nahum Shalman <nshalman@omniti.com>
+Nakul Pathak <nakulpathak3@hotmail.com>
+Nalin Dahyabhai <nalin@redhat.com>
+Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
+Natalie Parker <nparker@omnifone.com>
+Natanael Copa <natanael.copa@docker.com>
+Natasha Jarus <linuxmercedes@gmail.com>
+Nate Brennand <nate.brennand@clever.com>
+Nate Eagleson <nate@nateeag.com>
+Nate Jones <nate@endot.org>
+Nathan Baulch <nathan.baulch@gmail.com>
+Nathan Carlson <carl4403@umn.edu>
+Nathan Herald <me@nathanherald.com>
+Nathan Hsieh <hsieh.nathan@gmail.com>
+Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
+Nathan McCauley <nathan.mccauley@docker.com>
+Nathan Williams <nathan@teamtreehouse.com>
+Naveed Jamil <naveed.jamil@tenpearls.com>
+Neal McBurnett <neal@mcburnett.org>
+Neil Horman <nhorman@tuxdriver.com>
+Neil Peterson <neilpeterson@outlook.com>
+Nelson Chen <crazysim@gmail.com>
+Neyazul Haque <nuhaque@gmail.com>
+Nghia Tran <nghia@google.com>
+Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
+Nick Adcock <nick.adcock@docker.com>
+Nick DeCoursin <n.decoursin@foodpanda.com>
+Nick Irvine <nfirvine@nfirvine.com>
+Nick Neisen <nwneisen@gmail.com>
+Nick Parker <nikaios@gmail.com>
+Nick Payne <nick@kurai.co.uk>
+Nick Russo <nicholasjamesrusso@gmail.com>
+Nick Santos <nick.santos@docker.com>
+Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
+Nick Stinemates <nick@stinemates.org>
+Nick Wood <nwood@microsoft.com>
+NickrenREN <yuquan.ren@easystack.cn>
+Nicola Kabar <nicolaka@gmail.com>
+Nicolas Borboën <ponsfrilus@gmail.com>
+Nicolas De Loof <nicolas.deloof@gmail.com>
+Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
+Nicolas Kaiser <nikai@nikai.net>
+Nicolas Sterchele <sterchele.nicolas@gmail.com>
+Nicolas V Castet <nvcastet@us.ibm.com>
+Nicolás Hock Isaza <nhocki@gmail.com>
+Niel Drummond <niel@drummond.lu>
+Nigel Poulton <nigelpoulton@hotmail.com>
+Nik Nyby <nikolas@gnu.org>
+Nikhil Chawla <chawlanikhil24@gmail.com>
+NikolaMandic <mn080202@gmail.com>
+Nikolas Garofil <nikolas.garofil@uantwerpen.be>
+Nikolay Edigaryev <edigaryev@gmail.com>
+Nikolay Milovanov <nmil@itransformers.net>
+ningmingxiao <ning.mingxiao@zte.com.cn>
+Nirmal Mehta <nirmalkmehta@gmail.com>
+Nishant Totla <nishanttotla@gmail.com>
+NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
+Noah Meyerhans <nmeyerha@amazon.com>
+Noah Treuhaft <noah.treuhaft@docker.com>
+NobodyOnSE <ich@sektor.selfip.com>
+noducks <onemannoducks@gmail.com>
+Nolan Darilek <nolan@thewordnerd.info>
+Nolan Miles <nolanpmiles@gmail.com>
+Noriki Nakamura <noriki.nakamura@miraclelinux.com>
+nponeccop <andy.melnikov@gmail.com>
+Nurahmadie <nurahmadie@gmail.com>
+Nuutti Kotivuori <naked@iki.fi>
+nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
+objectified <objectified@gmail.com>
+Octol1ttle <l1ttleofficial@outlook.com>
+Odin Ugedal <odin@ugedal.com>
+Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
+Ohad Schneider <ohadschn@users.noreply.github.com>
+ohmystack <jun.jiang02@ele.me>
+Ole Reifschneider <mail@ole-reifschneider.de>
+Oliver Neal <ItsVeryWindy@users.noreply.github.com>
+Oliver Reason <oli@overrateddev.co>
+Olivier Gambier <dmp42@users.noreply.github.com>
+Olle Jonsson <olle.jonsson@gmail.com>
+Olli Janatuinen <olli.janatuinen@gmail.com>
+Olly Pomeroy <oppomeroy@gmail.com>
+Omri Shiv <Omri.Shiv@teradata.com>
+Onur Filiz <onur.filiz@microsoft.com>
+Oriol Francès <oriolfa@gmail.com>
+Oscar Bonilla <6f6231@gmail.com>
+oscar.chen <2972789494@qq.com>
+Oskar Niburski <oskarniburski@gmail.com>
+Otto Kekäläinen <otto@seravo.fi>
+Ouyang Liduo <oyld0210@163.com>
+Ovidio Mallo <ovidio.mallo@gmail.com>
+Panagiotis Moustafellos <pmoust@elastic.co>
+Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal <pascalgn@users.noreply.github.com>
+Pascal Bach <pascal.bach@siemens.com>
+Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
+Patrick Böänziger <patrick.baenziger@bsi-software.com>
+Patrick Devine <patrick.devine@docker.com>
+Patrick Haas <patrickhaas@google.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick St. laurent <patrick@saint-laurent.us>
+Patrick Stapleton <github@gdi2290.com>
+Patrik Cyvoct <patrik@ptrk.io>
+pattichen <craftsbear@gmail.com>
+Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
+Paul <paul9869@gmail.com>
+paul <paul@inkling.com>
+Paul Annesley <paul@annesley.cc>
+Paul Bellamy <paul.a.bellamy@gmail.com>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
+Paul Furtado <pfurtado@hubspot.com>
+Paul Hammond <paul@paulhammond.org>
+Paul Jimenez <pj@place.org>
+Paul Kehrer <paul.l.kehrer@gmail.com>
+Paul Lietar <paul@lietar.net>
+Paul Liljenberg <liljenberg.paul@gmail.com>
+Paul Morie <pmorie@gmail.com>
+Paul Nasrat <pnasrat@gmail.com>
+Paul Seiffert <paul.seiffert@jimdo.com>
+Paul Weaver <pauweave@cisco.com>
+Paulo Gomes <pjbgf@linux.com>
+Paulo Ribeiro <paigr.io@gmail.com>
+Pavel Lobashov <ShockwaveNN@gmail.com>
+Pavel Matěja <pavel@verotel.cz>
+Pavel Pletenev <cpp.create@gmail.com>
+Pavel Pospisil <pospispa@gmail.com>
+Pavel Sutyrin <pavel.sutyrin@gmail.com>
+Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
+Pavlos Ratis <dastergon@gentoo.org>
+Pavol Vargovcik <pallly.vargovcik@gmail.com>
+Pawel Konczalski <mail@konczalski.de>
+Paweł Gronowski <pawel.gronowski@docker.com>
+payall4u <payall4u@qq.com>
+Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
+Peggy Li <peggyli.224@gmail.com>
+Pei Su <sillyousu@gmail.com>
+Peng Tao <bergwolf@gmail.com>
+Penghan Wang <ph.wang@daocloud.io>
+Per Weijnitz <per.weijnitz@gmail.com>
+perhapszzy@sina.com <perhapszzy@sina.com>
+Pete Woods <pete.woods@circleci.com>
+Peter Bourgon <peter@bourgon.org>
+Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Bücker <peter.buecker@pressrelations.de>
+Peter Choi <phkchoi89@gmail.com>
+Peter Dave Hello <hsu@peterdavehello.org>
+Peter Edge <peter.edge@gmail.com>
+Peter Ericson <pdericson@gmail.com>
+Peter Esbensen <pkesbensen@gmail.com>
+Peter Jaffe <pjaffe@nevo.com>
+Peter Kang <peter@spell.run>
+Peter Malmgren <ptmalmgren@gmail.com>
+Peter Salvatore <peter@psftw.com>
+Peter Volpe <petervo@redhat.com>
+Peter Waller <p@pwaller.net>
+Petr Švihlík <svihlik.petr@gmail.com>
+Petros Angelatos <petrosagg@gmail.com>
+Phil <underscorephil@gmail.com>
+Phil Estes <estesp@gmail.com>
+Phil Sphicas <phil.sphicas@att.com>
+Phil Spitler <pspitler@gmail.com>
+Philip Alexander Etling <paetling@gmail.com>
+Philip K. Warren <pkwarren@gmail.com>
+Philip Monroe <phil@philmonroe.com>
+Philipp Fruck <dev@p-fruck.de>
+Philipp Gillé <philipp.gille@gmail.com>
+Philipp Wahala <philipp.wahala@gmail.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
+Phillip Alexander <git@phillipalexander.io>
+phineas <phin@phineas.io>
+pidster <pid@pidster.com>
+Piergiuliano Bossi <pgbossi@gmail.com>
+Pierre <py@poujade.org>
+Pierre Carrier <pierre@meteor.com>
+Pierre Dal-Pra <dalpra.pierre@gmail.com>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
+pinglanlu <pinglanlu@outlook.com>
+Piotr Bogdan <ppbogdan@gmail.com>
+Piotr Karbowski <piotr.karbowski@protonmail.ch>
+Porjo <porjo38@yahoo.com.au>
+Poul Kjeldager Sørensen <pks@s-innovations.net>
+Pradeep Chhetri <pradeep@indix.com>
+Pradip Dhara <pradipd@microsoft.com>
+Pradipta Kr. Banerjee <bpradip@in.ibm.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Pratik Karki <prertik@outlook.com>
+Prayag Verma <prayag.verma@gmail.com>
+Priya Wadhwa <priyawadhwa@google.com>
+Projjol Banerji <probaner23@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
+Puneet Pruthi <puneet.pruthi@oracle.com>
+Pure White <daniel48@126.com>
+pysqz <randomq@126.com>
+Qiang Huang <h.huangqiang@huawei.com>
+Qin TianHuan <tianhuan@bingotree.cn>
+Qinglan Peng <qinglanpeng@zju.edu.cn>
+Quan Tian <tianquan@cloudin.cn>
+qudongfang <qudongfang@gmail.com>
+Quentin Brossard <qbrossard@gmail.com>
+Quentin Perez <qperez@ocs.online.net>
+Quentin Tayssier <qtayssier@gmail.com>
+r0n22 <cameron.regan@gmail.com>
+Rachit Sharma <rachitsharma613@gmail.com>
+Radostin Stoyanov <rstoyanov1@gmail.com>
+Rafael Fernández López <ereslibre@ereslibre.es>
+Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
+Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Raghuram Devarakonda <draghuram@gmail.com>
+Raja Sami <raja.sami@tenpearls.com>
+Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
+Ralf Sippl <ralf.sippl@gmail.com>
+Ralle <spam@rasmusa.net>
+Ralph Bean <rbean@redhat.com>
+Ramkumar Ramachandra <artagnon@gmail.com>
+Ramon Brooker <rbrooker@aetherealmind.com>
+Ramon van Alteren <ramon@vanalteren.nl>
+RaviTeja Pothana <ravi-teja@live.com>
+Ray Tsang <rayt@google.com>
+ReadmeCritic <frankensteinbot@gmail.com>
+realityone <realityone@me.com>
+Recursive Madman <recursive.madman@gmx.de>
+Reficul <xuzhenglun@gmail.com>
+Regan McCooey <rmccooey27@aol.com>
+Remi Rampin <remirampin@gmail.com>
+Remy Suen <remy.suen@gmail.com>
+Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
+Renaud Gaubert <rgaubert@nvidia.com>
+Rhys Hiltner <rhys@twitch.tv>
+Ri Xu <xuri.me@gmail.com>
+Ricardo N Feliciano <FelicianoTech@gmail.com>
+Rich Horwood <rjhorwood@apple.com>
+Rich Moyse <rich@moyse.us>
+Rich Seymour <rseymour@gmail.com>
+Richard Burnison <rburnison@ebay.com>
+Richard Hansen <rhansen@rhansen.org>
+Richard Harvey <richard@squarecows.com>
+Richard Mathie <richard.mathie@amey.co.uk>
+Richard Metzler <richard@paadee.com>
+Richard Scothern <richard.scothern@gmail.com>
+Richo Healey <richo@psych0tik.net>
+Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Rick Wieman <git@rickw.nl>
+Rik Nijessen <rik@keefo.nl>
+Riku Voipio <riku.voipio@linaro.org>
+Riley Guerin <rileytg.dev@gmail.com>
+Ritesh H Shukla <sritesh@vmware.com>
+Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
+Rob Cowsill <42620235+rcowsill@users.noreply.github.com>
+Rob Gulewich <rgulewich@netflix.com>
+Rob Murray <rob.murray@docker.com>
+Rob Vesse <rvesse@dotnetrdf.org>
+Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
+Robert Obryk <robryk@gmail.com>
+Robert Schneider <mail@shakeme.info>
+Robert Shade <robert.shade@gmail.com>
+Robert Stern <lexandro2000@gmail.com>
+Robert Terhaar <rterhaar@atlanticdynamic.com>
+Robert Wallis <smilingrob@gmail.com>
+Robert Wang <robert@arctic.tw>
+Roberto G. Hashioka <roberto.hashioka@docker.com>
+Roberto Muñoz Fernández <robertomf@gmail.com>
+Robin Naundorf <r.naundorf@fh-muenster.de>
+Robin Schneider <ypid@riseup.net>
+Robin Speekenbrink <robin@kingsquare.nl>
+Robin Thoni <robin@rthoni.com>
+robpc <rpcann@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Rodrigo Campos <rodrigoca@microsoft.com>
+Rodrigo Vaz <rodrigo.vaz@gmail.com>
+Roel Van Nyen <roel.vannyen@gmail.com>
+Roger Peppe <rogpeppe@gmail.com>
+Rohit Jnagal <jnagal@google.com>
+Rohit Kadam <rohit.d.kadam@gmail.com>
+Rohit Kapur <rkapur@flatiron.com>
+Rojin George <rojingeorge@huawei.com>
+Roland Huß <roland@jolokia.org>
+Roland Kammerer <roland.kammerer@linbit.com>
+Roland Moriz <rmoriz@users.noreply.github.com>
+Roma Sokolov <sokolov.r.v@gmail.com>
+Roman Dudin <katrmr@gmail.com>
+Roman Mazur <roman@balena.io>
+Roman Strashkin <roman.strashkin@gmail.com>
+Roman Volosatovs <roman.volosatovs@docker.com>
+Roman Zabaluev <gpg@haarolean.dev>
+Ron Smits <ron.smits@gmail.com>
+Ron Williams <ron.a.williams@gmail.com>
+Rong Gao <gaoronggood@163.com>
+Rong Zhang <rongzhang@alauda.io>
+Rongxiang Song <tinysong1226@gmail.com>
+Rony Weng <ronyweng@synology.com>
+root <docker-dummy@example.com>
+root <root@lxdebmas.marist.edu>
+root <root@ubuntu-14.04-amd64-vbox>
+root <root@webm215.cluster016.ha.ovh.net>
+Rory Hunter <roryhunter2@gmail.com>
+Rory McCune <raesene@gmail.com>
+Ross Boucher <rboucher@gmail.com>
+Rovanion Luckey <rovanion.luckey@gmail.com>
+Roy Reznik <roy@wiz.io>
+Royce Remer <royceremer@gmail.com>
+Rozhnov Alexandr <nox73@ya.ru>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Rui Cao <ruicao@alauda.io>
+Rui JingAn <quiterace@gmail.com>
+Rui Lopes <rgl@ruilopes.com>
+Ruilin Li <liruilin4@huawei.com>
+Runshen Zhu <runshen.zhu@gmail.com>
+Russ Magee <rmagee@gmail.com>
+Ryan Abrams <rdabrams@gmail.com>
+Ryan Anderson <anderson.ryanc@gmail.com>
+Ryan Aslett <github@mixologic.com>
+Ryan Barry <rbarry@mirantis.com>
+Ryan Belgrave <rmb1993@gmail.com>
+Ryan Campbell <campbellr@gmail.com>
+Ryan Detzel <ryan.detzel@gmail.com>
+Ryan Fowler <rwfowler@gmail.com>
+Ryan Liu <ryanlyy@me.com>
+Ryan McLaughlin <rmclaughlin@insidesales.com>
+Ryan O'Donnell <odonnellryanc@gmail.com>
+Ryan Seto <ryanseto@yak.net>
+Ryan Shea <sheabot03@gmail.com>
+Ryan Simmen <ryan.simmen@gmail.com>
+Ryan Stelly <ryan.stelly@live.com>
+Ryan Thomas <rthomas@atlassian.com>
+Ryan Trauntvein <rtrauntvein@novacoast.com>
+Ryan Wallner <ryan.wallner@clusterhq.com>
+Ryan Zhang <ryan.zhang@docker.com>
+ryancooper7 <ryan.cooper7@gmail.com>
+RyanDeng <sheldon.d1018@gmail.com>
+Ryo Nakao <nakabonne@gmail.com>
+Ryoga Saito <contact@proelbtn.com>
+Régis Behmo <regis@behmo.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
+s. rannou <mxs@sbrk.org>
+Sabin Basyal <sabin.basyal@gmail.com>
+Sachin Joshi <sachin_jayant_joshi@hotmail.com>
+Sagar Hani <sagarhani33@gmail.com>
+Sainath Grandhi <sainath.grandhi@intel.com>
+Sakeven Jiang <jc5930@sina.cn>
+Salahuddin Khan <salah@docker.com>
+Sally O'Malley <somalley@redhat.com>
+Sam Abed <sam.abed@gmail.com>
+Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
+Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Neirinck <sam@samneirinck.com>
+Sam Reis <sreis@atlassian.com>
+Sam Rijs <srijs@airpost.net>
+Sam Thibault <sam.thibault@docker.com>
+Sam Whited <sam@samwhited.com>
+Sambuddha Basu <sambuddhabasu1@gmail.com>
+Sami Wagiaalla <swagiaal@redhat.com>
+Samuel Andaya <samuel@andaya.net>
+Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
+Samuel Karp <me@samuelkarp.com>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
+sanchayanghosh <sanchayanghosh@outlook.com>
+Sandeep Bansal <sabansal@microsoft.com>
+Sankar சங்கர் <sankar.curiosity@gmail.com>
+Sanket Saurav <sanketsaurav@gmail.com>
+Santhosh Manohar <santhosh@docker.com>
+sapphiredev <se.imas.kr@gmail.com>
+Sargun Dhillon <sargun@netflix.com>
+Sascha Andres <sascha.andres@outlook.com>
+Sascha Grunert <sgrunert@suse.com>
+SataQiu <qiushida@beyondcent.com>
+Satnam Singh <satnam@raintown.org>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
+Satoshi Tagomori <tagomoris@gmail.com>
+Scott Bessler <scottbessler@gmail.com>
+Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Moser <smoser@brickies.net>
+Scott Percival <scottp@lastyard.com>
+Scott Stamp <scottstamp851@gmail.com>
+Scott Walls <sawalls@umich.edu>
+sdreyesg <sdreyesg@gmail.com>
+Sean Christopherson <sean.j.christopherson@intel.com>
+Sean Cronin <seancron@gmail.com>
+Sean Lee <seanlee@tw.ibm.com>
+Sean McIntyre <s.mcintyre@xverba.ca>
+Sean OMeara <sean@chef.io>
+Sean P. Kane <skane@newrelic.com>
+Sean Rodman <srodman7689@gmail.com>
+Sebastiaan van Steenis <mail@superseb.nl>
+Sebastiaan van Stijn <github@gone.nl>
+Sebastian Höffner <sebastian.hoeffner@mevis.fraunhofer.de>
+Sebastian Radloff <sradloff23@gmail.com>
+Sebastian Thomschke <sebthom@users.noreply.github.com>
+Sebastien Goasguen <runseb@gmail.com>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+Senthil Kumaran <senthil@uthcode.com>
+SeongJae Park <sj38.park@gmail.com>
+Seongyeol Lim <seongyeol37@gmail.com>
+Serge Hallyn <serge.hallyn@ubuntu.com>
+Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
+Sergey Evstifeev <sergey.evstifeev@gmail.com>
+Sergii Kabashniuk <skabashnyuk@codenvy.com>
+Sergio Lopez <slp@redhat.com>
+Serhat Gülçiçek <serhat25@gmail.com>
+Serhii Nakon <serhii.n@thescimus.com>
+SeungUkLee <lsy931106@gmail.com>
+Sevki Hasirci <s@sevki.org>
+Shane Canon <scanon@lbl.gov>
+Shane da Silva <shane@dasilva.io>
+Shaun Kaasten <shaunk@gmail.com>
+Shaun Thompson <shaun.thompson@docker.com>
+shaunol <shaunol@gmail.com>
+Shawn Landden <shawn@churchofgit.com>
+Shawn Siefkas <shawn.siefkas@meredith.com>
+shawnhe <shawnhe@shawnhedeMacBook-Pro.local>
+Shayan Pooya <shayan@liveve.org>
+Shayne Wang <shaynexwang@gmail.com>
+Shekhar Gulati <shekhargulati84@gmail.com>
+Sheng Yang <sheng@yasker.org>
+Shengbo Song <thomassong@tencent.com>
+Shengjing Zhu <zhsj@debian.org>
+Shev Yan <yandong_8212@163.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Shihao Xia <charlesxsh@hotmail.com>
+Shijiang Wei <mountkin@gmail.com>
+Shijun Qin <qinshijun16@mails.ucas.ac.cn>
+Shishir Mahajan <shishir.mahajan@redhat.com>
+Shoubhik Bose <sbose78@gmail.com>
+Shourya Sarcar <shourya.sarcar@gmail.com>
+Shreenidhi Shedi <shreenidhi.shedi@broadcom.com>
+Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
+shuai-z <zs.broccoli@gmail.com>
+Shukui Yang <yangshukui@huawei.com>
+Sian Lerk Lau <kiawin@gmail.com>
+Siarhei Rasiukevich <s_rasiukevich@wargaming.net>
+Sidhartha Mani <sidharthamn@gmail.com>
+sidharthamani <sid@rancher.com>
+Silas Sewell <silas@sewell.org>
+Silvan Jegen <s.jegen@gmail.com>
+Simão Reis <smnrsti@gmail.com>
+Simon Barendse <simon.barendse@gmail.com>
+Simon Eskildsen <sirup@sirupsen.com>
+Simon Ferquel <simon.ferquel@docker.com>
+Simon Leinen <simon.leinen@gmail.com>
+Simon Menke <simon.menke@gmail.com>
+Simon Taranto <simon.taranto@gmail.com>
+Simon Vikstrom <pullreq@devsn.se>
+Sindhu S <sindhus@live.in>
+Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+skanehira <sho19921005@gmail.com>
+Smark Meng <smark@freecoop.net>
+Solganik Alexander <solganik@gmail.com>
+Solomon Hykes <solomon@docker.com>
+Song Gao <song@gao.io>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+Sotiris Salloumis <sotiris.salloumis@gmail.com>
+Soulou <leo@unbekandt.eu>
+Spencer Brown <spencer@spencerbrown.org>
+Spencer Smith <robertspencersmith@gmail.com>
+Spike Curtis <spike.curtis@metaswitch.com>
+Sridatta Thatipamala <sthatipamala@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
+Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
+Staf Wagemakers <staf@wagemakers.be>
+Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
+Stanislav Levin <slev@altlinux.org>
+Steeve Morin <steeve.morin@gmail.com>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+Stefan Gehrig <stefan.gehrig.hn@googlemail.com>
+Stefan J. Wernli <swernli@microsoft.com>
+Stefan Praszalowicz <stefan@greplin.com>
+Stefan S. <tronicum@user.github.com>
+Stefan Scherer <stefan.scherer@docker.com>
+Stefan Staudenmeyer <doerte@instana.com>
+Stefan Weil <sw@weilnetz.de>
+Steffen Butzer <steffen.butzer@outlook.com>
+Stephan Henningsen <stephan-henningsen@users.noreply.github.com>
+Stephan Spindler <shutefan@gmail.com>
+Stephen Benjamin <stephen@redhat.com>
+Stephen Crosby <stevecrozz@gmail.com>
+Stephen Day <stevvooe@gmail.com>
+Stephen Drake <stephen@xenolith.net>
+Stephen Rust <srust@blockbridge.com>
+Steve Desmond <steve@vtsv.ca>
+Steve Dougherty <steve@asksteved.com>
+Steve Durrheimer <s.durrheimer@gmail.com>
+Steve Francia <steve.francia@gmail.com>
+Steve Koch <stevekochscience@gmail.com>
+Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Erenst <stevenerenst@gmail.com>
+Steven Hartland <steven.hartland@multiplay.co.uk>
+Steven Iveson <sjiveson@outlook.com>
+Steven Merrill <steven.merrill@gmail.com>
+Steven Richards <steven@axiomzen.co>
+Steven Taylor <steven.taylor@me.com>
+Stéphane Este-Gracias <sestegra@gmail.com>
+Stig Larsson <stig@larsson.dev>
+Su Wang <su.wang@docker.com>
+Subhajit Ghosh <isubuz.g@gmail.com>
+Sujith Haridasan <sujith.h@gmail.com>
+Sun Gengze <690388648@qq.com>
+Sun Jianbo <wonderflow.sun@gmail.com>
+Sune Keller <sune.keller@gmail.com>
+Sunny Gogoi <indiasuny000@gmail.com>
+Suryakumar Sudar <surya.trunks@gmail.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Swapnil Daingade <swapnil.daingade@gmail.com>
+Sylvain Baubeau <lebauce@gmail.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+Sébastien <sebastien@yoozio.com>
+Sébastien HOUZÉ <cto@verylastroom.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
+Sören Tempel <soeren+git@soeren-tempel.net>
+Tabakhase <mail@tabakhase.com>
+Tadej Janež <tadej.j@nez.si>
+Tadeusz Dudkiewicz <tadeusz.dudkiewicz@rtbhouse.com>
+Takuto Sato <tockn.jp@gmail.com>
+tang0th <tang0th@gmx.com>
+Tangi Colin <tangicolin@gmail.com>
+Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
+Taylan Isikdemir <taylani@google.com>
+Taylor Jones <monitorjbl@gmail.com>
+tcpdumppy <847462026@qq.com>
+Ted M. Young <tedyoung@gmail.com>
+Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejaswini Duggaraju <naduggar@microsoft.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
+Terry Chu <zue.hterry@gmail.com>
+terryding77 <550147740@qq.com>
+Thatcher Peskens <thatcher@docker.com>
+theadactyl <thea.lamkin@gmail.com>
+Thell 'Bo' Fowler <thell@tbfowler.name>
+Thermionix <bond711@gmail.com>
+Thiago Alves Silva <thiago.alves@aurea.com>
+Thijs Terlouw <thijsterlouw@gmail.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
+Thomas Gazagnaire <thomas@gazagnaire.org>
+Thomas Graf <tgraf@suug.ch>
+Thomas Grainger <tagrain@gmail.com>
+Thomas Hansen <thomas.hansen@gmail.com>
+Thomas Ledos <thomas.ledos92@gmail.com>
+Thomas Leonard <thomas.leonard@docker.com>
+Thomas Léveil <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
+Thomas Riccardi <riccardi@systran.fr>
+Thomas Schroeter <thomas@cliqz.com>
+Thomas Sjögren <konstruktoid@users.noreply.github.com>
+Thomas Swift <tgs242@gmail.com>
+Thomas Tanaka <thomas.tanaka@oracle.com>
+Thomas Texier <sharkone@en-mousse.org>
+Ti Zhou <tizhou1986@gmail.com>
+Tiago Seabra <tlgs@users.noreply.github.com>
+Tianon Gravi <admwiggin@gmail.com>
+Tianyi Wang <capkurmagati@gmail.com>
+Tibor Vass <teabee89@gmail.com>
+Tiffany Jernigan <tiffany.f.j@gmail.com>
+Tiffany Low <tiffany@box.com>
+Till Claassen <pixelistik@users.noreply.github.com>
+Till Wegmüller <toasterson@gmail.com>
+Tim <elatllat@gmail.com>
+Tim Bart <tim@fewagainstmany.com>
+Tim Bosse <taim@bosboot.org>
+Tim Dettrick <t.dettrick@uq.edu.au>
+Tim Düsterhus <tim@bastelstu.be>
+Tim Hockin <thockin@google.com>
+Tim Potter <tpot@hpe.com>
+Tim Ruffles <oi@truffles.me.uk>
+Tim Smith <timbot@google.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Tim Wagner <tim.wagner@freenet.ag>
+Tim Wang <timwangdev@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
+Tim Zju <21651152@zju.edu.cn>
+timchenxiaoyu <837829664@qq.com>
+timfeirg <kkcocogogo@gmail.com>
+Timo Rothenpieler <timo@rothenpieler.org>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
+Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Bradtke <webwurst@gmail.com>
+Tobias Gesellchen <tobias@gesellix.de>
+Tobias Klauser <tklauser@distanz.ch>
+Tobias Munk <schmunk@usrbin.de>
+Tobias Pfandzelter <tobias@pfandzelter.com>
+Tobias Schmidt <ts@soundcloud.com>
+Tobias Schwab <tobias.schwab@dynport.de>
+Todd Crane <todd@toddcrane.com>
+Todd Lunter <tlunter@gmail.com>
+Todd Whiteman <todd.whiteman@joyent.com>
+Toli Kuznets <toli@docker.com>
+Tom Barlow <tomwbarlow@gmail.com>
+Tom Booth <tombooth@gmail.com>
+Tom Denham <tom@tomdee.co.uk>
+Tom Fotherby <tom+github@peopleperhour.com>
+Tom Howe <tom.howe@enstratius.com>
+Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom Parker <palfrey@tevp.net>
+Tom Sweeney <tsweeney@redhat.com>
+Tom Wilkie <tom.wilkie@gmail.com>
+Tom X. Tobin <tomxtobin@tomxtobin.com>
+Tom Zhao <zlwangel@gmail.com>
+Tomas Janousek <tomi@nomi.cz>
+Tomas Kral <tomas.kral@gmail.com>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
+Tomek Mańko <tomek.manko@railgun-solutions.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tomoya Tabuchi <t@tomoyat1.com>
+Tomáš Hrčka <thrcka@redhat.com>
+Tomáš Virtus <nechtom@gmail.com>
+tonic <tonicbupt@gmail.com>
+Tonny Xu <tonny.xu@gmail.com>
+Tony Abboud <tdabboud@hotmail.com>
+Tony Daws <tony@daws.ca>
+Tony Miller <mcfiredrill@gmail.com>
+toogley <toogley@mailbox.org>
+Torstein Husebø <torstein@huseboe.net>
+Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
+Tõnis Tiigi <tonistiigi@gmail.com>
+Trace Andreason <tandreason@gmail.com>
+tracylihui <793912329@qq.com>
+Trapier Marshall <tmarshall@mirantis.com>
+Travis Cline <travis.cline@gmail.com>
+Travis Thieman <travis.thieman@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
+Trevor <trevinwoodstock@gmail.com>
+Trevor Pounds <trevor.pounds@gmail.com>
+Trevor Sullivan <pcgeek86@gmail.com>
+Trishna Guha <trishnaguha17@gmail.com>
+Tristan Carel <tristan@cogniteev.com>
+Troy Denton <trdenton@gmail.com>
+Tudor Brindus <me@tbrindus.ca>
+Ty Alexander <ty.alexander@sendgrid.com>
+Tycho Andersen <tycho@docker.com>
+Tyler Brock <tyler.brock@gmail.com>
+Tyler Brown <tylers.pile@gmail.com>
+Tzu-Jung Lee <roylee17@gmail.com>
+uhayate <uhayate.gong@daocloud.io>
+Ulysse Carion <ulyssecarion@gmail.com>
+Umesh Yadav <umesh4257@gmail.com>
+Utz Bacher <utz.bacher@de.ibm.com>
+vagrant <vagrant@ubuntu-14.04-amd64-vbox>
+Vaidas Jablonskis <jablonskis@gmail.com>
+Valentin Kulesh <valentin.kulesh@virtuozzo.com>
+vanderliang <lansheng@meili-inc.com>
+Velko Ivanov <vivanov@deeperplane.com>
+Veres Lajos <vlajos@gmail.com>
+Victor Algaze <valgaze@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Costan <costan@gmail.com>
+Victor I. Wood <viw@t2am.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Victor Marmol <vmarmol@google.com>
+Victor Palma <palma.victor@gmail.com>
+Victor Toni <victor.toni@gmail.com>
+Victor Vieux <victor.vieux@docker.com>
+Victoria Bialas <victoria.bialas@docker.com>
+Vijaya Kumar K <vijayak@caviumnetworks.com>
+Vikas Choudhary <choudharyvikas16@gmail.com>
+Vikram bir Singh <vsingh@mirantis.com>
+Viktor Stanchev <me@viktorstanchev.com>
+Viktor Vojnovski <viktor.vojnovski@amadeus.com>
+VinayRaghavanKS <raghavan.vinay@gmail.com>
+Vincent Batts <vbatts@redhat.com>
+Vincent Bernat <vincent@bernat.ch>
+Vincent Boulineau <vincent.boulineau@datadoghq.com>
+Vincent Demeester <vincent.demeester@docker.com>
+Vincent Giersch <vincent.giersch@ovh.net>
+Vincent Mayers <vincent.mayers@inbloom.org>
+Vincent Woo <me@vincentwoo.com>
+Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
+Vishnu Kannan <vishnuk@google.com>
+Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
+Vitor Anjos <bartier@users.noreply.github.com>
+Vitor Monteiro <vmrmonteiro@gmail.com>
+Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
+Vladimir Bulyga <xx@ccxx.cc>
+Vladimir Kirillov <proger@wilab.org.ua>
+Vladimir Pouzanov <farcaller@google.com>
+Vladimir Rutsky <altsysrq@gmail.com>
+Vladimir Varankin <nek.narqo+git@gmail.com>
+VladimirAus <v_roudakov@yahoo.com>
+Vladislav Kolesnikov <vkolesnikov@beget.ru>
+Vlastimil Zeman <vlastimil.zeman@diffblue.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
+voloder <110066198+voloder@users.noreply.github.com>
+Walter Leibbrandt <github@wrl.co.za>
+Walter Stanish <walter@pratyeka.org>
+Wang Chao <chao.wang@ucloud.cn>
+Wang Guoliang <liangcszzu@163.com>
+Wang Jie <wangjie5@chinaskycloud.com>
+Wang Long <long.wanglong@huawei.com>
+Wang Ping <present.wp@icloud.com>
+Wang Xing <hzwangxing@corp.netease.com>
+Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Wang Yumu <37442693@qq.com>
+wanghuaiqing <wanghuaiqing@loongson.cn>
+Ward Vandewege <ward@jhvc.com>
+WarheadsSE <max@warheads.net>
+Wassim Dhif <wassimdhif@gmail.com>
+Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
+Wayne Chang <wayne@neverfear.org>
+Wayne Song <wsong@docker.com>
+weebney <weebney@gmail.com>
+Weerasak Chongnguluam <singpor@gmail.com>
+Wei Fu <fuweid89@gmail.com>
+Wei Wu <wuwei4455@gmail.com>
+Wei-Ting Kuo <waitingkuo0527@gmail.com>
+weipeng <weipeng@tuscloud.io>
+weiyan <weiyan3@huawei.com>
+Weiyang Zhu <cnresonant@gmail.com>
+Wen Cheng Ma <wenchma@cn.ibm.com>
+Wendel Fleming <wfleming@usc.edu>
+Wenjun Tang <tangwj2@lenovo.com>
+Wenkai Yin <yinw@vmware.com>
+wenlxie <wenlxie@ebay.com>
+Wenxuan Zhao <viz@linux.com>
+Wenyu You <21551128@zju.edu.cn>
+Wenzhi Liang <wenzhi.liang@gmail.com>
+Wes Morgan <cap10morgan@gmail.com>
+Wesley Pettit <wppttt@amazon.com>
+Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
+Wiktor Kwapisiewicz <wiktor@metacode.biz>
+Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
+willhf <willhf@gmail.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Hubbs <w.d.hubbs@gmail.com>
+William Martin <wmartin@pivotal.io>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+Wilson Júnior <wilsonpjunior@gmail.com>
+Wing-Kam Wong <wingkwong.code@gmail.com>
+WiseTrem <shepelyov.g@gmail.com>
+Wolfgang Nagele <mail@wnagele.com>
+Wolfgang Powisch <powo@powo.priv.at>
+Wonjun Kim <wonjun.kim@navercorp.com>
+WuLonghui <wlh6666@qq.com>
+xamyzhao <x.amy.zhao@gmail.com>
+Xia Wu <xwumzn@amazon.com>
+Xian Chaobo <xianchaobo@huawei.com>
+Xianglin Gao <xlgao@zju.edu.cn>
+Xianjie <guxianjie@gmail.com>
+Xianlu Bird <xianlubird@gmail.com>
+Xiao YongBiao <xyb4638@gmail.com>
+Xiao Zhang <xiaozhang0210@hotmail.com>
+XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaodong Liu <liuxiaodong@loongson.cn>
+Xiaodong Zhang <a4012017@sina.com>
+Xiaohua Ding <xiao_hua_ding@sina.cn>
+Xiaoxi He <xxhe@alauda.io>
+Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
+Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
+xichengliudui <1693291525@qq.com>
+xiekeyang <xiekeyang@huawei.com>
+Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
+xin.li <xin.li@daocloud.io>
+Xinbo Weng <xihuanbo_0521@zju.edu.cn>
+Xinfeng Liu <XinfengLiu@icloud.com>
+Xinzi Zhou <imdreamrunner@gmail.com>
+Xiuming Chen <cc@cxm.cc>
+Xuecong Liao <satorulogic@gmail.com>
+xuzhaokui <cynicholas@gmail.com>
+Yadnyawalkya Tale <ytale@redhat.com>
+Yahya <ya7yaz@gmail.com>
+yalpul <yalpul@gmail.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yamasaki Masahide <masahide.y@gmail.com>
+Yamazaki Masashi <masi19bw@gmail.com>
+Yan Feng <yanfeng2@huawei.com>
+Yan Zhu <yanzhu@alauda.io>
+Yang Bai <hamo.by@gmail.com>
+Yang Li <idealhack@gmail.com>
+Yang Pengfei <yangpengfei4@huawei.com>
+yangchenliang <yangchenliang@huawei.com>
+Yann Autissier <yann.autissier@gmail.com>
+Yanqiang Miao <miao.yanqiang@zte.com.cn>
+Yao Zaiyong <yaozaiyong@hotmail.com>
+Yash Murty <yashmurty@gmail.com>
+Yassine Tijani <yasstij11@gmail.com>
+Yasunori Mahata <nori@mahata.net>
+Yazhong Liu <yorkiefixer@gmail.com>
+Yestin Sun <sunyi0804@gmail.com>
+Yi EungJun <eungjun.yi@navercorp.com>
+Yibai Zhang <xm1994@gmail.com>
+Yihang Ho <hoyihang5@gmail.com>
+Ying Li <ying.li@docker.com>
+Yohei Ueda <yohei@jp.ibm.com>
+Yong Tang <yong.tang.github@outlook.com>
+Yongxin Li <yxli@alauda.io>
+Yongzhi Pan <panyongzhi@gmail.com>
+Yosef Fertel <yfertel@gmail.com>
+You-Sheng Yang (楊有勝) <vicamo@gmail.com>
+youcai <omegacoleman@gmail.com>
+Youcef YEKHLEF <yyekhlef@gmail.com>
+Youfu Zhang <zhangyoufu@gmail.com>
+YR Chen <stevapple@icloud.com>
+Yu Changchun <yuchangchun1@huawei.com>
+Yu Chengxia <yuchengxia@huawei.com>
+Yu Peng <yu.peng36@zte.com.cn>
+Yu-Ju Hong <yjhong@google.com>
+Yuan Sun <sunyuan3@huawei.com>
+Yuanhong Peng <pengyuanhong@huawei.com>
+Yue Zhang <zy675793960@yeah.net>
+Yufei Xiong <yufei.xiong@qq.com>
+Yuhao Fang <fangyuhao@gmail.com>
+Yuichiro Kaneko <spiketeika@gmail.com>
+YujiOshima <yuji.oshima0x3fd@gmail.com>
+Yunxiang Huang <hyxqshk@vip.qq.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+Yusuf Tarık Günaydın <yusuf_tarik@hotmail.com>
+Yves Blusseau <90z7oey02@sneakemail.com>
+Yves Junqueira <yves.junqueira@gmail.com>
+Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
+Zach Gershman <zachgersh@gmail.com>
+Zachary Jaffee <zjaffee@us.ibm.com>
+Zain Memon <zain@inzain.net>
+Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
+Zefan Li <lizefan@huawei.com>
+Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
+Zhang Kun <zkazure@gmail.com>
+Zhang Wei <zhangwei555@huawei.com>
+Zhang Wentao <zhangwentao234@huawei.com>
+zhangguanzhang <zhangguanzhang@qq.com>
+ZhangHang <stevezhang2014@gmail.com>
+zhangxianwei <xianwei.zw@alibaba-inc.com>
+Zhenan Ye <21551168@zju.edu.cn>
+zhenghenghuo <zhenghenghuo@zju.edu.cn>
+Zhenhai Gao <gaozh1988@live.com>
+Zhenkun Bi <bi.zhenkun@zte.com.cn>
+ZhiPeng Lu <lu.zhipeng@zte.com.cn>
+zhipengzuo <zuozhipeng@baidu.com>
+Zhou Hao <zhouhao@cn.fujitsu.com>
+Zhoulin Xie <zhoulin.xie@daocloud.io>
+Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
+Zhu Kunjia <zhu.kunjia@zte.com.cn>
+Zhuoyun Wei <wzyboy@wzyboy.org>
+Ziheng Liu <lzhfromustc@gmail.com>
+Zilin Du <zilin.du@gmail.com>
+zimbatm <zimbatm@zimbatm.com>
+Ziming Dong <bnudzm@foxmail.com>
+ZJUshuaizhou <21551191@zju.edu.cn>
+zmarouf <zeid.marouf@gmail.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
+Zou Yu <zouyu7@huawei.com>
+zqh <zqhxuyuan@gmail.com>
+Zuhayr Elahi <zuhayr.elahi@docker.com>
+Zunayed Ali <zunayed@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+Átila Camurça Alves <camurca.home@gmail.com>
+吴小白 <296015668@qq.com>
+尹吉峰 <jifeng.yin@gmail.com>
+屈骏 <qujun@tiduyun.com>
+徐俊杰 <paco.xu@daocloud.io>
+慕陶 <jihui.xjh@alibaba-inc.com>
+搏通 <yufeng.pyf@alibaba-inc.com>
+黄艳红00139573 <huang.yanhong@zte.com.cn>
+정재영 <jjy600901@gmail.com>
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 0000000..6d8d58f
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2018 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 0000000..58b19b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2017 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/creack/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md
new file mode 100644
index 0000000..381f198
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/README.md
@@ -0,0 +1,42 @@
+# Working on the Engine API
+
+The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
+
+It consists of various components in this repository:
+
+- `api/swagger.yaml` A Swagger definition of the API.
+- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
+- `cli/` The command-line client.
+- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
+- `daemon/` The daemon, which serves the API.
+
+## Swagger definition
+
+The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
+
+1. Automatically generate documentation.
+2. Automatically generate the Go server and client. (A work-in-progress.)
+3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
+
+## Updating the API documentation
+
+The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
+
+The file is split into two main sections:
+
+- `definitions`, which defines re-usable objects used in requests and responses
+- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
+
+To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
+
+There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
+
+`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
+
+## Viewing the API documentation
+
+When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
+
+Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
+
+The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
new file mode 100644
index 0000000..2c62cd4
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -0,0 +1,20 @@
+package api // import "github.com/docker/docker/api"
+
+// Common constants for daemon and client.
+const (
+ // DefaultVersion of the current REST API.
+ DefaultVersion = "1.48"
+
+ // MinSupportedAPIVersion is the minimum API version that can be supported
+ // by the API server, specified as "major.minor". Note that the daemon
+ // may be configured with a different minimum API version, as returned
+ // in [github.com/docker/docker/api/types.Version.MinAPIVersion].
+ //
+ // API requests for API versions lower than the configured version produce
+ // an error.
+ MinSupportedAPIVersion = "1.24"
+
+ // NoBaseImageSpecifier is the symbol used by the FROM
+ // command to specify that no base image is to be used.
+ NoBaseImageSpecifier = "scratch"
+)
diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml
new file mode 100644
index 0000000..f07a027
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml
@@ -0,0 +1,12 @@
+
+layout:
+ models:
+ - name: definition
+ source: asset:model
+ target: "{{ joinFilePath .Target .ModelPackage }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
+ operations:
+ - name: handler
+ source: asset:serverOperation
+ target: "{{ joinFilePath .Target .APIPackage .Package }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
new file mode 100644
index 0000000..646032d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -0,0 +1,13528 @@
+# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
+#
+# This is used for generating API documentation and the types used by the
+# client/server. See api/README.md for more information.
+#
+# Some style notes:
+# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
+# descriptions.
+# - There is no maximum line length, for ease of editing and pretty diffs.
+# - operationIds are in the format "NounVerb", with a singular noun.
+
+swagger: "2.0"
+schemes:
+ - "http"
+ - "https"
+produces:
+ - "application/json"
+ - "text/plain"
+consumes:
+ - "application/json"
+ - "text/plain"
+basePath: "/v1.48"
+info:
+ title: "Docker Engine API"
+ version: "1.48"
+ x-logo:
+ url: "https://docs.docker.com/assets/images/logo-docker-main.png"
+ description: |
+ The Engine API is an HTTP API served by Docker Engine. It is the API the
+ Docker client uses to communicate with the Engine, so everything the Docker
+ client can do can be done with the API.
+
+ Most of the client's commands map directly to API endpoints (e.g. `docker ps`
+ is `GET /containers/json`). The notable exception is running containers,
+ which consists of several API calls.
+
+ # Errors
+
+ The API uses standard HTTP status codes to indicate the success or failure
+ of the API call. The body of the response will be JSON in the following
+ format:
+
+ ```
+ {
+ "message": "page not found"
+ }
+ ```
+
+ # Versioning
+
+ The API is usually changed in each release, so API calls are versioned to
+ ensure that clients don't break. To lock to a specific version of the API,
+ you prefix the URL with its version, for example, call `/v1.30/info` to use
+ the v1.30 version of the `/info` endpoint. If the API version specified in
+ the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
+ is returned.
+
+ If you omit the version-prefix, the current version of the API (v1.48) is used.
+ For example, calling `/info` is the same as calling `/v1.48/info`. Using the
+ API without a version-prefix is deprecated and will be removed in a future release.
+
+ Engine releases in the near future should support this version of the API,
+ so your client will continue to work even if it is talking to a newer Engine.
+
+ The API uses an open schema model, which means the server may add extra properties
+ to responses. Likewise, the server will ignore any extra query parameters and
+ request body properties. When you write clients, you need to ignore additional
+ properties in responses to ensure they do not break when talking to newer
+ daemons.
+
+
+ # Authentication
+
+ Authentication for registries is handled client side. The client has to send
+ authentication details to various endpoints that need to communicate with
+ registries, such as `POST /images/(name)/push`. These are sent as
+ `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5)
+ (JSON) string with the following structure:
+
+ ```
+ {
+ "username": "string",
+ "password": "string",
+ "email": "string",
+ "serveraddress": "string"
+ }
+ ```
+
+ The `serveraddress` is a domain/IP without a protocol. Throughout this
+ structure, double quotes are required.
+
+ If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth),
+ you can just pass this instead of credentials:
+
+ ```
+ {
+ "identitytoken": "9cbaf023786cd7..."
+ }
+ ```
+
+# The tags on paths define the menu sections in the ReDoc documentation, so
+# the usage of tags must make sense for that:
+# - They should be singular, not plural.
+# - There should not be too many tags, or the menu becomes unwieldy. For
+# example, it is preferable to add a path to the "System" tag instead of
+# creating a tag with a single path in it.
+# - The order of tags in this list defines the order in the menu.
+tags:
+ # Primary objects
+ - name: "Container"
+ x-displayName: "Containers"
+ description: |
+ Create and manage containers.
+ - name: "Image"
+ x-displayName: "Images"
+ - name: "Network"
+ x-displayName: "Networks"
+ description: |
+ Networks are user-defined networks that containers can be attached to.
+ See the [networking documentation](https://docs.docker.com/network/)
+ for more information.
+ - name: "Volume"
+ x-displayName: "Volumes"
+ description: |
+ Create and manage persistent storage that can be attached to containers.
+ - name: "Exec"
+ x-displayName: "Exec"
+ description: |
+ Run new commands inside running containers. Refer to the
+ [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/)
+ for more information.
+
+ To exec a command in a container, you first need to create an exec instance,
+ then start it. These two API endpoints are wrapped up in a single command-line
+ command, `docker exec`.
+
+ # Swarm things
+ - name: "Swarm"
+ x-displayName: "Swarm"
+ description: |
+ Engines can be clustered together in a swarm. Refer to the
+ [swarm mode documentation](https://docs.docker.com/engine/swarm/)
+ for more information.
+ - name: "Node"
+ x-displayName: "Nodes"
+ description: |
+ Nodes are instances of the Engine participating in a swarm. Swarm mode
+ must be enabled for these endpoints to work.
+ - name: "Service"
+ x-displayName: "Services"
+ description: |
+ Services are the definitions of tasks to run on a swarm. Swarm mode must
+ be enabled for these endpoints to work.
+ - name: "Task"
+ x-displayName: "Tasks"
+ description: |
+ A task is a container running on a swarm. It is the atomic scheduling unit
+ of swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Secret"
+ x-displayName: "Secrets"
+ description: |
+ Secrets are sensitive data that can be used by services. Swarm mode must
+ be enabled for these endpoints to work.
+ - name: "Config"
+ x-displayName: "Configs"
+ description: |
+ Configs are application configurations that can be used by services. Swarm
+ mode must be enabled for these endpoints to work.
+ # System things
+ - name: "Plugin"
+ x-displayName: "Plugins"
+ - name: "System"
+ x-displayName: "System"
+
+definitions:
+ Port:
+ type: "object"
+ description: "An open port on a container"
+ required: [PrivatePort, Type]
+ properties:
+ IP:
+ type: "string"
+ format: "ip-address"
+ description: "Host IP address that the container's port is mapped to"
+ PrivatePort:
+ type: "integer"
+ format: "uint16"
+ x-nullable: false
+ description: "Port on the container"
+ PublicPort:
+ type: "integer"
+ format: "uint16"
+ description: "Port exposed on the host"
+ Type:
+ type: "string"
+ x-nullable: false
+ enum: ["tcp", "udp", "sctp"]
+ example:
+ PrivatePort: 8080
+ PublicPort: 80
+ Type: "tcp"
+
+ MountPoint:
+ type: "object"
+ description: |
+ MountPoint represents a mount point configuration inside the container.
+ This is used for reporting the mountpoints in use by a container.
+ properties:
+ Type:
+ description: |
+ The mount type:
+
+ - `bind` a mount of a file or directory from the host into the container.
+ - `volume` a docker volume with the given `Name`.
+ - `image` a docker image
+ - `tmpfs` a `tmpfs`.
+ - `npipe` a named pipe from the host into the container.
+ - `cluster` a Swarm cluster volume
+ type: "string"
+ enum:
+ - "bind"
+ - "volume"
+ - "image"
+ - "tmpfs"
+ - "npipe"
+ - "cluster"
+ example: "volume"
+ Name:
+ description: |
+ Name is the name reference to the underlying data defined by `Source`
+ e.g., the volume name.
+ type: "string"
+ example: "myvolume"
+ Source:
+ description: |
+ Source location of the mount.
+
+ For volumes, this contains the storage location of the volume (within
+ `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
+ the source (host) part of the bind-mount. For `tmpfs` mount points, this
+ field is empty.
+ type: "string"
+ example: "/var/lib/docker/volumes/myvolume/_data"
+ Destination:
+ description: |
+ Destination is the path relative to the container root (`/`) where
+ the `Source` is mounted inside the container.
+ type: "string"
+ example: "/usr/share/nginx/html/"
+ Driver:
+ description: |
+ Driver is the volume driver used to create the volume (if it is a volume).
+ type: "string"
+ example: "local"
+ Mode:
+ description: |
+ Mode is a comma separated list of options supplied by the user when
+ creating the bind/volume mount.
+
+ The default is platform-specific (`"z"` on Linux, empty on Windows).
+ type: "string"
+ example: "z"
+ RW:
+ description: |
+ Whether the mount is mounted writable (read-write).
+ type: "boolean"
+ example: true
+ Propagation:
+ description: |
+ Propagation describes how mounts are propagated from the host into the
+ mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)
+ for details. This field is not used on Windows.
+ type: "string"
+ example: ""
+
+ DeviceMapping:
+ type: "object"
+ description: "A device mapping between the host and container"
+ properties:
+ PathOnHost:
+ type: "string"
+ PathInContainer:
+ type: "string"
+ CgroupPermissions:
+ type: "string"
+ example:
+ PathOnHost: "/dev/deviceName"
+ PathInContainer: "/dev/deviceName"
+ CgroupPermissions: "mrw"
+
+ DeviceRequest:
+ type: "object"
+ description: "A request for devices to be sent to device drivers"
+ properties:
+ Driver:
+ type: "string"
+ example: "nvidia"
+ Count:
+ type: "integer"
+ example: -1
+ DeviceIDs:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "0"
+ - "1"
+ - "GPU-fef8089b-4820-abfc-e83e-94318197576e"
+ Capabilities:
+ description: |
+ A list of capabilities; an OR list of AND lists of capabilities.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ # gpu AND nvidia AND compute
+ - ["gpu", "nvidia", "compute"]
+ Options:
+ description: |
+ Driver-specific options, specified as a key/value pairs. These options
+ are passed directly to the driver.
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ ThrottleDevice:
+ type: "object"
+ properties:
+ Path:
+ description: "Device path"
+ type: "string"
+ Rate:
+ description: "Rate"
+ type: "integer"
+ format: "int64"
+ minimum: 0
+
+ Mount:
+ type: "object"
+ properties:
+ Target:
+ description: "Container path."
+ type: "string"
+ Source:
+ description: "Mount source (e.g. a volume name, a host path)."
+ type: "string"
+ Type:
+ description: |
+ The mount type. Available types:
+
+ - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.
+ - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
+ - `image` Mounts an image.
+ - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
+ - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
+ - `cluster` a Swarm cluster volume
+ type: "string"
+ enum:
+ - "bind"
+ - "volume"
+ - "image"
+ - "tmpfs"
+ - "npipe"
+ - "cluster"
+ ReadOnly:
+ description: "Whether the mount should be read-only."
+ type: "boolean"
+ Consistency:
+ description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
+ type: "string"
+ BindOptions:
+ description: "Optional configuration for the `bind` type."
+ type: "object"
+ properties:
+ Propagation:
+ description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
+ type: "string"
+ enum:
+ - "private"
+ - "rprivate"
+ - "shared"
+ - "rshared"
+ - "slave"
+ - "rslave"
+ NonRecursive:
+ description: "Disable recursive bind mount."
+ type: "boolean"
+ default: false
+ CreateMountpoint:
+ description: "Create mount point on host if missing"
+ type: "boolean"
+ default: false
+ ReadOnlyNonRecursive:
+ description: |
+ Make the mount non-recursively read-only, but still leave the mount recursive
+ (unless NonRecursive is set to `true` in conjunction).
+
+ Added in v1.44, before that version all read-only mounts were
+ non-recursive by default. To match the previous behaviour this
+ will default to `true` for clients on versions prior to v1.44.
+ type: "boolean"
+ default: false
+ ReadOnlyForceRecursive:
+ description: "Raise an error if the mount cannot be made recursively read-only."
+ type: "boolean"
+ default: false
+ VolumeOptions:
+ description: "Optional configuration for the `volume` type."
+ type: "object"
+ properties:
+ NoCopy:
+ description: "Populate volume with data from the target."
+ type: "boolean"
+ default: false
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ DriverConfig:
+ description: "Map of driver specific options"
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the driver to use to create the volume."
+ type: "string"
+ Options:
+ description: "key/value map of driver specific options."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Subpath:
+ description: "Source path inside the volume. Must be relative without any back traversals."
+ type: "string"
+ example: "dir-inside-volume/subdirectory"
+ ImageOptions:
+ description: "Optional configuration for the `image` type."
+ type: "object"
+ properties:
+ Subpath:
+ description: "Source path inside the image. Must be relative without any back traversals."
+ type: "string"
+ example: "dir-inside-image/subdirectory"
+ TmpfsOptions:
+ description: "Optional configuration for the `tmpfs` type."
+ type: "object"
+ properties:
+ SizeBytes:
+ description: "The size for the tmpfs mount in bytes."
+ type: "integer"
+ format: "int64"
+ Mode:
+ description: "The permission mode for the tmpfs mount in an integer."
+ type: "integer"
+ Options:
+ description: |
+ The options to be passed to the tmpfs mount. An array of arrays.
+ Flag options should be provided as 1-length arrays. Other types
+ should be provided as as 2-length arrays, where the first item is
+ the key and the second the value.
+ type: "array"
+ items:
+ type: "array"
+ minItems: 1
+ maxItems: 2
+ items:
+ type: "string"
+ example:
+ [["noexec"]]
+
+ RestartPolicy:
+ description: |
+ The behavior to apply when the container exits. The default is not to
+ restart.
+
+ An ever increasing delay (double the previous delay, starting at 100ms) is
+ added before each restart to prevent flooding the server.
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ description: |
+ - Empty string means not to restart
+ - `no` Do not automatically restart
+ - `always` Always restart
+ - `unless-stopped` Restart always except when the user has manually stopped the container
+ - `on-failure` Restart only when the container exit code is non-zero
+ enum:
+ - ""
+ - "no"
+ - "always"
+ - "unless-stopped"
+ - "on-failure"
+ MaximumRetryCount:
+ type: "integer"
+ description: |
+ If `on-failure` is used, the number of times to retry before giving up.
+
+ Resources:
+ description: "A container's resources (cgroups config, ulimits, etc)"
+ type: "object"
+ properties:
+ # Applicable to all platforms
+ CpuShares:
+ description: |
+ An integer value representing this container's relative CPU weight
+ versus other containers.
+ type: "integer"
+ Memory:
+ description: "Memory limit in bytes."
+ type: "integer"
+ format: "int64"
+ default: 0
+ # Applicable to UNIX platforms
+ CgroupParent:
+ description: |
+ Path to `cgroups` under which the container's `cgroup` is created. If
+ the path is not absolute, the path is considered to be relative to the
+ `cgroups` path of the init process. Cgroups are created if they do not
+ already exist.
+ type: "string"
+ BlkioWeight:
+ description: "Block IO weight (relative weight)."
+ type: "integer"
+ minimum: 0
+ maximum: 1000
+ BlkioWeightDevice:
+ description: |
+ Block IO weight (relative device weight) in the form:
+
+ ```
+ [{"Path": "device_path", "Weight": weight}]
+ ```
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Path:
+ type: "string"
+ Weight:
+ type: "integer"
+ minimum: 0
+ BlkioDeviceReadBps:
+ description: |
+ Limit read rate (bytes per second) from a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteBps:
+ description: |
+ Limit write rate (bytes per second) to a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceReadIOps:
+ description: |
+ Limit read rate (IO per second) from a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteIOps:
+ description: |
+ Limit write rate (IO per second) to a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ CpuPeriod:
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ format: "int64"
+ CpuQuota:
+ description: |
+ Microseconds of CPU time that the container can get in a CPU period.
+ type: "integer"
+ format: "int64"
+ CpuRealtimePeriod:
+ description: |
+ The length of a CPU real-time period in microseconds. Set to 0 to
+ allocate no time allocated to real-time tasks.
+ type: "integer"
+ format: "int64"
+ CpuRealtimeRuntime:
+ description: |
+ The length of a CPU real-time runtime in microseconds. Set to 0 to
+ allocate no time allocated to real-time tasks.
+ type: "integer"
+ format: "int64"
+ CpusetCpus:
+ description: |
+ CPUs in which to allow execution (e.g., `0-3`, `0,1`).
+ type: "string"
+ example: "0-3"
+ CpusetMems:
+ description: |
+ Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
+ effective on NUMA systems.
+ type: "string"
+ Devices:
+ description: "A list of devices to add to the container."
+ type: "array"
+ items:
+ $ref: "#/definitions/DeviceMapping"
+ DeviceCgroupRules:
+ description: "a list of cgroup rules to apply to the container"
+ type: "array"
+ items:
+ type: "string"
+ example: "c 13:* rwm"
+ DeviceRequests:
+ description: |
+ A list of requests for devices to be sent to device drivers.
+ type: "array"
+ items:
+ $ref: "#/definitions/DeviceRequest"
+ KernelMemoryTCP:
+ description: |
+ Hard limit for kernel TCP buffer memory (in bytes). Depending on the
+ OCI runtime in use, this option may be ignored. It is no longer supported
+ by the default (runc) runtime.
+
+ This field is omitted when empty.
+ type: "integer"
+ format: "int64"
+ MemoryReservation:
+ description: "Memory soft limit in bytes."
+ type: "integer"
+ format: "int64"
+ MemorySwap:
+ description: |
+ Total memory limit (memory + swap). Set as `-1` to enable unlimited
+ swap.
+ type: "integer"
+ format: "int64"
+ MemorySwappiness:
+ description: |
+ Tune a container's memory swappiness behavior. Accepts an integer
+ between 0 and 100.
+ type: "integer"
+ format: "int64"
+ minimum: 0
+ maximum: 100
+ NanoCpus:
+ description: "CPU quota in units of 10<sup>-9</sup> CPUs."
+ type: "integer"
+ format: "int64"
+ OomKillDisable:
+ description: "Disable OOM Killer for the container."
+ type: "boolean"
+ Init:
+ description: |
+ Run an init inside the container that forwards signals and reaps
+ processes. This field is omitted if empty, and the default (as
+ configured on the daemon) is used.
+ type: "boolean"
+ x-nullable: true
+ PidsLimit:
+ description: |
+ Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`
+ to not change.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ Ulimits:
+ description: |
+ A list of resource limits to set in the container. For example:
+
+ ```
+ {"Name": "nofile", "Soft": 1024, "Hard": 2048}
+ ```
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Name:
+ description: "Name of ulimit"
+ type: "string"
+ Soft:
+ description: "Soft limit"
+ type: "integer"
+ Hard:
+ description: "Hard limit"
+ type: "integer"
+ # Applicable to Windows
+ CpuCount:
+ description: |
+ The number of usable CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are
+ mutually exclusive. The order of precedence is `CPUCount` first, then
+ `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ CpuPercent:
+ description: |
+ The usable percentage of the available CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are
+ mutually exclusive. The order of precedence is `CPUCount` first, then
+ `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ IOMaximumIOps:
+ description: "Maximum IOps for the container system drive (Windows only)"
+ type: "integer"
+ format: "int64"
+ IOMaximumBandwidth:
+ description: |
+ Maximum IO in bytes per second for the container system drive
+ (Windows only).
+ type: "integer"
+ format: "int64"
+
+ Limit:
+ description: |
+ An object describing a limit on resources which can be requested by a task.
+ type: "object"
+ properties:
+ NanoCPUs:
+ type: "integer"
+ format: "int64"
+ example: 4000000000
+ MemoryBytes:
+ type: "integer"
+ format: "int64"
+ example: 8272408576
+ Pids:
+ description: |
+ Limits the maximum number of PIDs in the container. Set `0` for unlimited.
+ type: "integer"
+ format: "int64"
+ default: 0
+ example: 100
+
+ ResourceObject:
+ description: |
+ An object describing the resources which can be advertised by a node and
+ requested by a task.
+ type: "object"
+ properties:
+ NanoCPUs:
+ type: "integer"
+ format: "int64"
+ example: 4000000000
+ MemoryBytes:
+ type: "integer"
+ format: "int64"
+ example: 8272408576
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+
+ GenericResources:
+ description: |
+ User-defined resources can be either Integer resources (e.g, `SSD=3`) or
+ String resources (e.g, `GPU=UUID1`).
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NamedResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "string"
+ DiscreteResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "integer"
+ format: "int64"
+ example:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ HealthConfig:
+ description: "A test to perform to check that the container is healthy."
+ type: "object"
+ properties:
+ Test:
+ description: |
+ The test to perform. Possible values are:
+
+ - `[]` inherit healthcheck from image or parent image
+ - `["NONE"]` disable healthcheck
+ - `["CMD", args...]` exec arguments directly
+ - `["CMD-SHELL", command]` run command with system's default shell
+ type: "array"
+ items:
+ type: "string"
+ Interval:
+ description: |
+ The time to wait between checks in nanoseconds. It should be 0 or at
+ least 1000000 (1 ms). 0 means inherit.
+ type: "integer"
+ format: "int64"
+ Timeout:
+ description: |
+ The time to wait before considering the check to have hung. It should
+ be 0 or at least 1000000 (1 ms). 0 means inherit.
+ type: "integer"
+ format: "int64"
+ Retries:
+ description: |
+ The number of consecutive failures needed to consider a container as
+ unhealthy. 0 means inherit.
+ type: "integer"
+ StartPeriod:
+ description: |
+ Start period for the container to initialize before starting
+ health-retries countdown in nanoseconds. It should be 0 or at least
+ 1000000 (1 ms). 0 means inherit.
+ type: "integer"
+ format: "int64"
+ StartInterval:
+ description: |
+ The time to wait between checks in nanoseconds during the start period.
+ It should be 0 or at least 1000000 (1 ms). 0 means inherit.
+ type: "integer"
+ format: "int64"
+
+ Health:
+ description: |
+ Health stores information about the container's healthcheck results.
+ type: "object"
+ x-nullable: true
+ properties:
+ Status:
+ description: |
+ Status is one of `none`, `starting`, `healthy` or `unhealthy`
+
+ - "none" Indicates there is no healthcheck
+ - "starting" Starting indicates that the container is not yet ready
+ - "healthy" Healthy indicates that the container is running correctly
+ - "unhealthy" Unhealthy indicates that the container has a problem
+ type: "string"
+ enum:
+ - "none"
+ - "starting"
+ - "healthy"
+ - "unhealthy"
+ example: "healthy"
+ FailingStreak:
+ description: "FailingStreak is the number of consecutive failures"
+ type: "integer"
+ example: 0
+ Log:
+ type: "array"
+ description: |
+ Log contains the last few results (oldest first)
+ items:
+ $ref: "#/definitions/HealthcheckResult"
+
+ HealthcheckResult:
+ description: |
+ HealthcheckResult stores information about a single run of a healthcheck probe
+ type: "object"
+ x-nullable: true
+ properties:
+ Start:
+ description: |
+ Date and time at which this check started in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "date-time"
+ example: "2020-01-04T10:44:24.496525531Z"
+ End:
+ description: |
+ Date and time at which this check ended in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2020-01-04T10:45:21.364524523Z"
+ ExitCode:
+ description: |
+ ExitCode meanings:
+
+ - `0` healthy
+ - `1` unhealthy
+ - `2` reserved (considered unhealthy)
+ - other values: error running probe
+ type: "integer"
+ example: 0
+ Output:
+ description: "Output from last check"
+ type: "string"
+
+ HostConfig:
+ description: "Container configuration that depends on the host we are running on"
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ # Applicable to all platforms
+ Binds:
+ type: "array"
+ description: |
+ A list of volume bindings for this container. Each volume binding
+ is a string in one of these forms:
+
+ - `host-src:container-dest[:options]` to bind-mount a host path
+ into the container. Both `host-src`, and `container-dest` must
+ be an _absolute_ path.
+ - `volume-name:container-dest[:options]` to bind-mount a volume
+ managed by a volume driver into the container. `container-dest`
+ must be an _absolute_ path.
+
+ `options` is an optional, comma-delimited list of:
+
+ - `nocopy` disables automatic copying of data from the container
+ path to the volume. The `nocopy` flag only applies to named volumes.
+ - `[ro|rw]` mounts a volume read-only or read-write, respectively.
+ If omitted or set to `rw`, volumes are mounted read-write.
+ - `[z|Z]` applies SELinux labels to allow or deny multiple containers
+ to read and write to the same volume.
+ - `z`: a _shared_ content label is applied to the content. This
+ label indicates that multiple containers can share the volume
+ content, for both reading and writing.
+ - `Z`: a _private unshared_ label is applied to the content.
+ This label indicates that only the current container can use
+ a private volume. Labeling systems such as SELinux require
+ proper labels to be placed on volume content that is mounted
+ into a container. Without a label, the security system can
+ prevent a container's processes from using the content. By
+ default, the labels set by the host operating system are not
+ modified.
+ - `[[r]shared|[r]slave|[r]private]` specifies mount
+ [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
+ This only applies to bind-mounted volumes, not internal volumes
+ or named volumes. Mount propagation requires the source mount
+ point (the location where the source directory is mounted in the
+ host operating system) to have the correct propagation properties.
+ For shared volumes, the source mount point must be set to `shared`.
+ For slave volumes, the mount must be set to either `shared` or
+ `slave`.
+ items:
+ type: "string"
+ ContainerIDFile:
+ type: "string"
+ description: "Path to a file where the container ID is written"
+ example: ""
+ LogConfig:
+ type: "object"
+ description: "The logging configuration for this container"
+ properties:
+ Type:
+ description: |-
+ Name of the logging driver used for the container or "none"
+ if logging is disabled.
+ type: "string"
+ enum:
+ - "local"
+ - "json-file"
+ - "syslog"
+ - "journald"
+ - "gelf"
+ - "fluentd"
+ - "awslogs"
+ - "splunk"
+ - "etwlogs"
+ - "none"
+ Config:
+ description: |-
+ Driver-specific configuration options for the logging driver.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ "max-file": "5"
+ "max-size": "10m"
+ NetworkMode:
+ type: "string"
+ description: |
+ Network mode to use for this container. Supported standard values
+ are: `bridge`, `host`, `none`, and `container:<name|id>`. Any
+ other value is taken as a custom network's name to which this
+ container should connect to.
+ PortBindings:
+ $ref: "#/definitions/PortMap"
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ AutoRemove:
+ type: "boolean"
+ description: |
+ Automatically remove the container when the container's process
+ exits. This has no effect if `RestartPolicy` is set.
+ VolumeDriver:
+ type: "string"
+ description: "Driver that this container uses to mount volumes."
+ VolumesFrom:
+ type: "array"
+ description: |
+ A list of volumes to inherit from another container, specified in
+ the form `<container name>[:<ro|rw>]`.
+ items:
+ type: "string"
+ Mounts:
+ description: |
+ Specification for mounts to be added to the container.
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+ ConsoleSize:
+ type: "array"
+ description: |
+ Initial console size, as an `[height, width]` array.
+ x-nullable: true
+ minItems: 2
+ maxItems: 2
+ items:
+ type: "integer"
+ minimum: 0
+ example: [80, 64]
+ Annotations:
+ type: "object"
+ description: |
+ Arbitrary non-identifying metadata attached to container and
+ provided to the runtime when the container is started.
+ additionalProperties:
+ type: "string"
+
+ # Applicable to UNIX platforms
+ CapAdd:
+ type: "array"
+ description: |
+ A list of kernel capabilities to add to the container. Conflicts
+ with option 'Capabilities'.
+ items:
+ type: "string"
+ CapDrop:
+ type: "array"
+ description: |
+ A list of kernel capabilities to drop from the container. Conflicts
+ with option 'Capabilities'.
+ items:
+ type: "string"
+ CgroupnsMode:
+ type: "string"
+ enum:
+ - "private"
+ - "host"
+ description: |
+ cgroup namespace mode for the container. Possible values are:
+
+ - `"private"`: the container runs in its own private cgroup namespace
+ - `"host"`: use the host system's cgroup namespace
+
+ If not specified, the daemon default is used, which can either be `"private"`
+ or `"host"`, depending on daemon version, kernel support and configuration.
+ Dns:
+ type: "array"
+ description: "A list of DNS servers for the container to use."
+ items:
+ type: "string"
+ DnsOptions:
+ type: "array"
+ description: "A list of DNS options."
+ items:
+ type: "string"
+ DnsSearch:
+ type: "array"
+ description: "A list of DNS search domains."
+ items:
+ type: "string"
+ ExtraHosts:
+ type: "array"
+ description: |
+ A list of hostnames/IP mappings to add to the container's `/etc/hosts`
+ file. Specified in the form `["hostname:IP"]`.
+ items:
+ type: "string"
+ GroupAdd:
+ type: "array"
+ description: |
+ A list of additional groups that the container process will run as.
+ items:
+ type: "string"
+ IpcMode:
+ type: "string"
+ description: |
+ IPC sharing mode for the container. Possible values are:
+
+ - `"none"`: own private IPC namespace, with /dev/shm not mounted
+ - `"private"`: own private IPC namespace
+ - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
+ - `"container:<name|id>"`: join another (shareable) container's IPC namespace
+ - `"host"`: use the host system's IPC namespace
+
+ If not specified, daemon default is used, which can either be `"private"`
+ or `"shareable"`, depending on daemon version and configuration.
+ Cgroup:
+ type: "string"
+ description: "Cgroup to use for the container."
+ Links:
+ type: "array"
+ description: |
+ A list of links for the container in the form `container_name:alias`.
+ items:
+ type: "string"
+ OomScoreAdj:
+ type: "integer"
+ description: |
+ An integer value containing the score given to the container in
+ order to tune OOM killer preferences.
+ example: 500
+ PidMode:
+ type: "string"
+ description: |
+ Set the PID (Process) Namespace mode for the container. It can be
+ either:
+
+ - `"container:<name|id>"`: joins another container's PID namespace
+ - `"host"`: use the host's PID namespace inside the container
+ Privileged:
+ type: "boolean"
+ description: |-
+ Gives the container full access to the host.
+ PublishAllPorts:
+ type: "boolean"
+ description: |
+ Allocates an ephemeral host port for all of a container's
+ exposed ports.
+
+ Ports are de-allocated when the container stops and allocated when
+ the container starts. The allocated port might be changed when
+ restarting the container.
+
+ The port is selected from the ephemeral port range that depends on
+ the kernel. For example, on Linux the range is defined by
+ `/proc/sys/net/ipv4/ip_local_port_range`.
+ ReadonlyRootfs:
+ type: "boolean"
+ description: "Mount the container's root filesystem as read only."
+ SecurityOpt:
+ type: "array"
+ description: |
+ A list of string values to customize labels for MLS systems, such
+ as SELinux.
+ items:
+ type: "string"
+ StorageOpt:
+ type: "object"
+ description: |
+ Storage driver options for this container, in the form `{"size": "120G"}`.
+ additionalProperties:
+ type: "string"
+ Tmpfs:
+ type: "object"
+ description: |
+ A map of container directories which should be replaced by tmpfs
+ mounts, and their corresponding mount options. For example:
+
+ ```
+ { "/run": "rw,noexec,nosuid,size=65536k" }
+ ```
+ additionalProperties:
+ type: "string"
+ UTSMode:
+ type: "string"
+ description: "UTS namespace to use for the container."
+ UsernsMode:
+ type: "string"
+ description: |
+ Sets the usernamespace mode for the container when usernamespace
+ remapping option is enabled.
+ ShmSize:
+ type: "integer"
+ format: "int64"
+ description: |
+ Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
+ minimum: 0
+ Sysctls:
+ type: "object"
+ x-nullable: true
+ description: |-
+ A list of kernel parameters (sysctls) to set in the container.
+
+ This field is omitted if not set.
+ additionalProperties:
+ type: "string"
+ example:
+ "net.ipv4.ip_forward": "1"
+ Runtime:
+ type: "string"
+ x-nullable: true
+ description: |-
+ Runtime to use with this container.
+ # Applicable to Windows
+ Isolation:
+ type: "string"
+ description: |
+ Isolation technology of the container. (Windows only)
+ enum:
+ - "default"
+ - "process"
+ - "hyperv"
+ - ""
+ MaskedPaths:
+ type: "array"
+ description: |
+ The list of paths to be masked inside the container (this overrides
+ the default set of paths).
+ items:
+ type: "string"
+ example:
+ - "/proc/asound"
+ - "/proc/acpi"
+ - "/proc/kcore"
+ - "/proc/keys"
+ - "/proc/latency_stats"
+ - "/proc/timer_list"
+ - "/proc/timer_stats"
+ - "/proc/sched_debug"
+ - "/proc/scsi"
+ - "/sys/firmware"
+ - "/sys/devices/virtual/powercap"
+ ReadonlyPaths:
+ type: "array"
+ description: |
+ The list of paths to be set as read-only inside the container
+ (this overrides the default set of paths).
+ items:
+ type: "string"
+ example:
+ - "/proc/bus"
+ - "/proc/fs"
+ - "/proc/irq"
+ - "/proc/sys"
+ - "/proc/sysrq-trigger"
+
+ ContainerConfig:
+ description: |
+ Configuration for a container that is portable between hosts.
+ type: "object"
+ properties:
+ Hostname:
+ description: |
+ The hostname to use for the container, as a valid RFC 1123 hostname.
+ type: "string"
+ example: "439f4e91bd1d"
+ Domainname:
+ description: |
+ The domain name to use for the container.
+ type: "string"
+ User:
+ description: |-
+ Commands run as this user inside the container. If omitted, commands
+ run as the user specified in the image the container was started from.
+
+ Can be either user-name or UID, and optional group-name or GID,
+ separated by a colon (`<user-name|UID>[<:group-name|GID>]`).
+ type: "string"
+ example: "123:456"
+ AttachStdin:
+ description: "Whether to attach to `stdin`."
+ type: "boolean"
+ default: false
+ AttachStdout:
+ description: "Whether to attach to `stdout`."
+ type: "boolean"
+ default: true
+ AttachStderr:
+ description: "Whether to attach to `stderr`."
+ type: "boolean"
+ default: true
+ ExposedPorts:
+ description: |
+ An object mapping ports to an empty object in the form:
+
+ `{"<port>/<tcp|udp|sctp>": {}}`
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ example: {
+ "80/tcp": {},
+ "443/tcp": {}
+ }
+ Tty:
+ description: |
+ Attach standard streams to a TTY, including `stdin` if it is not closed.
+ type: "boolean"
+ default: false
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ default: false
+ StdinOnce:
+ description: "Close `stdin` after one attached client disconnects"
+ type: "boolean"
+ default: false
+ Env:
+ description: |
+ A list of environment variables to set inside the container in the
+ form `["VAR=value", ...]`. A variable without `=` is removed from the
+ environment, rather than to have an empty value.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Cmd:
+ description: |
+ Command to run specified as a string or an array of strings.
+ type: "array"
+ items:
+ type: "string"
+ example: ["/bin/sh"]
+ Healthcheck:
+ $ref: "#/definitions/HealthConfig"
+ ArgsEscaped:
+ description: "Command is already escaped (Windows only)"
+ type: "boolean"
+ default: false
+ example: false
+ x-nullable: true
+ Image:
+ description: |
+ The name (or reference) of the image to use when creating the container,
+ or which was used when the container was created.
+ type: "string"
+ example: "example-image:1.0"
+ Volumes:
+ description: |
+ An object mapping mount point paths inside the container to empty
+ objects.
+ type: "object"
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ WorkingDir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ example: "/public/"
+ Entrypoint:
+ description: |
+ The entry point for the container as a string or an array of strings.
+
+ If the array consists of exactly one empty string (`[""]`) then the
+ entry point is reset to system default (i.e., the entry point used by
+ docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
+ type: "array"
+ items:
+ type: "string"
+ example: []
+ NetworkDisabled:
+ description: "Disable networking for the container."
+ type: "boolean"
+ x-nullable: true
+ MacAddress:
+ description: |
+ MAC address of the container.
+
+ Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead.
+ type: "string"
+ x-nullable: true
+ OnBuild:
+ description: |
+ `ONBUILD` metadata that were defined in the image's `Dockerfile`.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: []
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ StopSignal:
+ description: |
+ Signal to stop a container as a string or unsigned integer.
+ type: "string"
+ example: "SIGTERM"
+ x-nullable: true
+ StopTimeout:
+ description: "Timeout to stop a container in seconds."
+ type: "integer"
+ default: 10
+ x-nullable: true
+ Shell:
+ description: |
+ Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: ["/bin/sh", "-c"]
+
+ ImageConfig:
+ description: |
+ Configuration of the image. These fields are used as defaults
+ when starting a container from the image.
+ type: "object"
+ properties:
+ Hostname:
+ description: |
+ The hostname to use for the container, as a valid RFC 1123 hostname.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always empty. It must not be used, and will be removed in API v1.48.
+ type: "string"
+ example: ""
+ Domainname:
+ description: |
+ The domain name to use for the container.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always empty. It must not be used, and will be removed in API v1.48.
+ type: "string"
+ example: ""
+ User:
+ description: "The user that commands are run as inside the container."
+ type: "string"
+ example: "web:web"
+ AttachStdin:
+ description: |
+ Whether to attach to `stdin`.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ AttachStdout:
+ description: |
+ Whether to attach to `stdout`.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ AttachStderr:
+ description: |
+ Whether to attach to `stderr`.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ ExposedPorts:
+ description: |
+ An object mapping ports to an empty object in the form:
+
+ `{"<port>/<tcp|udp|sctp>": {}}`
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ example: {
+ "80/tcp": {},
+ "443/tcp": {}
+ }
+ Tty:
+ description: |
+ Attach standard streams to a TTY, including `stdin` if it is not closed.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ OpenStdin:
+ description: |
+ Open `stdin`
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ StdinOnce:
+ description: |
+ Close `stdin` after one attached client disconnects.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always false. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ Env:
+ description: |
+ A list of environment variables to set inside the container in the
+ form `["VAR=value", ...]`. A variable without `=` is removed from the
+ environment, rather than to have an empty value.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Cmd:
+ description: |
+ Command to run specified as a string or an array of strings.
+ type: "array"
+ items:
+ type: "string"
+ example: ["/bin/sh"]
+ Healthcheck:
+ $ref: "#/definitions/HealthConfig"
+ ArgsEscaped:
+ description: "Command is already escaped (Windows only)"
+ type: "boolean"
+ default: false
+ example: false
+ x-nullable: true
+ Image:
+ description: |
+ The name (or reference) of the image to use when creating the container,
+ or which was used when the container was created.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always empty. It must not be used, and will be removed in API v1.48.
+ type: "string"
+ default: ""
+ example: ""
+ Volumes:
+ description: |
+ An object mapping mount point paths inside the container to empty
+ objects.
+ type: "object"
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ example:
+ "/app/data": {}
+ "/app/config": {}
+ WorkingDir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ example: "/public/"
+ Entrypoint:
+ description: |
+ The entry point for the container as a string or an array of strings.
+
+ If the array consists of exactly one empty string (`[""]`) then the
+ entry point is reset to system default (i.e., the entry point used by
+ docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
+ type: "array"
+ items:
+ type: "string"
+ example: []
+ NetworkDisabled:
+ description: |
+ Disable networking for the container.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always omitted. It must not be used, and will be removed in API v1.48.
+ type: "boolean"
+ default: false
+ example: false
+ x-nullable: true
+ MacAddress:
+ description: |
+ MAC address of the container.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always omitted. It must not be used, and will be removed in API v1.48.
+ type: "string"
+ default: ""
+ example: ""
+ x-nullable: true
+ OnBuild:
+ description: |
+ `ONBUILD` metadata that were defined in the image's `Dockerfile`.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: []
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ StopSignal:
+ description: |
+ Signal to stop a container as a string or unsigned integer.
+ type: "string"
+ example: "SIGTERM"
+ x-nullable: true
+ StopTimeout:
+ description: |
+ Timeout to stop a container in seconds.
+
+ <p><br /></p>
+
+ > **Deprecated**: this field is not part of the image specification and is
+ > always omitted. It must not be used, and will be removed in API v1.48.
+ type: "integer"
+ default: 10
+ x-nullable: true
+ Shell:
+ description: |
+ Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: ["/bin/sh", "-c"]
+ # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed.
+ example:
+ "Hostname": ""
+ "Domainname": ""
+ "User": "web:web"
+ "AttachStdin": false
+ "AttachStdout": false
+ "AttachStderr": false
+ "ExposedPorts": {
+ "80/tcp": {},
+ "443/tcp": {}
+ }
+ "Tty": false
+ "OpenStdin": false
+ "StdinOnce": false
+ "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
+ "Cmd": ["/bin/sh"]
+ "Healthcheck": {
+ "Test": ["string"],
+ "Interval": 0,
+ "Timeout": 0,
+ "Retries": 0,
+ "StartPeriod": 0,
+ "StartInterval": 0
+ }
+ "ArgsEscaped": true
+ "Image": ""
+ "Volumes": {
+ "/app/data": {},
+ "/app/config": {}
+ }
+ "WorkingDir": "/public/"
+ "Entrypoint": []
+ "OnBuild": []
+ "Labels": {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value"
+ }
+ "StopSignal": "SIGTERM"
+ "Shell": ["/bin/sh", "-c"]
+
+ NetworkingConfig:
+ description: |
+ NetworkingConfig represents the container's networking configuration for
+ each of its interfaces.
+ It is used for the networking configs specified in the `docker create`
+ and `docker network connect` commands.
+ type: "object"
+ properties:
+ EndpointsConfig:
+ description: |
+ A mapping of network name to endpoint configuration for that network.
+ The endpoint configuration can be left empty to connect to that
+ network with no particular endpoint configuration.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ # putting an example here, instead of using the example values from
+ # /definitions/EndpointSettings, because EndpointSettings contains
+ # operational data returned when inspecting a container that we don't
+ # accept here.
+ EndpointsConfig:
+ isolated_nw:
+ IPAMConfig:
+ IPv4Address: "172.20.30.33"
+ IPv6Address: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ - "169.254.34.68"
+ - "fe80::3468"
+ MacAddress: "02:42:ac:12:05:02"
+ Links:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ - "server_x"
+ - "server_y"
+ database_nw: {}
+
+ NetworkSettings:
+ description: "NetworkSettings exposes the network settings in the API"
+ type: "object"
+ properties:
+ Bridge:
+ description: |
+ Name of the default bridge interface when dockerd's --bridge flag is set.
+ type: "string"
+ example: "docker0"
+ SandboxID:
+ description: SandboxID uniquely represents a container's network stack.
+ type: "string"
+ example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
+ HairpinMode:
+ description: |
+ Indicates if hairpin NAT should be enabled on the virtual interface.
+
+ Deprecated: This field is never set and will be removed in a future release.
+ type: "boolean"
+ example: false
+ LinkLocalIPv6Address:
+ description: |
+ IPv6 unicast address using the link-local prefix.
+
+ Deprecated: This field is never set and will be removed in a future release.
+ type: "string"
+ example: ""
+ LinkLocalIPv6PrefixLen:
+ description: |
+ Prefix length of the IPv6 unicast address.
+
+ Deprecated: This field is never set and will be removed in a future release.
+ type: "integer"
+ example: ""
+ Ports:
+ $ref: "#/definitions/PortMap"
+ SandboxKey:
+ description: SandboxKey is the full path of the netns handle
+ type: "string"
+ example: "/var/run/docker/netns/8ab54b426c38"
+
+ SecondaryIPAddresses:
+ description: "Deprecated: This field is never set and will be removed in a future release."
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ SecondaryIPv6Addresses:
+ description: "Deprecated: This field is never set and will be removed in a future release."
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ # TODO properties below are part of DefaultNetworkSettings, which is
+ # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
+ EndpointID:
+ description: |
+ EndpointID uniquely represents a service endpoint in a Sandbox.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.1"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 64
+ IPAddress:
+ description: |
+ IPv4 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address for this network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8:2::100"
+ MacAddress:
+ description: |
+ MAC address for the container on the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ Networks:
+ description: |
+ Information about all networks that the container is connected to.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+
+ Address:
+ description: Address represents an IPv4 or IPv6 IP address.
+ type: "object"
+ properties:
+ Addr:
+ description: IP address.
+ type: "string"
+ PrefixLen:
+ description: Mask length of the IP address.
+ type: "integer"
+
+ PortMap:
+ description: |
+ PortMap describes the mapping of container ports to host ports, using the
+ container's port-number and protocol as key in the format `<port>/<protocol>`,
+ for example, `80/udp`.
+
+ If a container's port is mapped for multiple protocols, separate entries
+ are added to the mapping table.
+ type: "object"
+ additionalProperties:
+ type: "array"
+ x-nullable: true
+ items:
+ $ref: "#/definitions/PortBinding"
+ example:
+ "443/tcp":
+ - HostIp: "127.0.0.1"
+ HostPort: "4443"
+ "80/tcp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ - HostIp: "0.0.0.0"
+ HostPort: "8080"
+ "80/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ "53/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "53"
+ "2377/tcp": null
+
+ PortBinding:
+ description: |
+ PortBinding represents a binding between a host IP address and a host
+ port.
+ type: "object"
+ properties:
+ HostIp:
+ description: "Host IP address that the container's port is mapped to."
+ type: "string"
+ example: "127.0.0.1"
+ HostPort:
+ description: "Host port number that the container's port is mapped to."
+ type: "string"
+ example: "4443"
+
+ DriverData:
+ description: |
+ Information about the storage driver used to store the container's and
+ image's filesystem.
+ type: "object"
+ required: [Name, Data]
+ properties:
+ Name:
+ description: "Name of the storage driver."
+ type: "string"
+ x-nullable: false
+ example: "overlay2"
+ Data:
+ description: |
+ Low-level storage metadata, provided as key/value pairs.
+
+ This information is driver-specific, and depends on the storage-driver
+ in use, and should be used for informational purposes only.
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example: {
+ "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged",
+ "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff",
+ "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"
+ }
+
+ FilesystemChange:
+ description: |
+ Change in the container's filesystem.
+ type: "object"
+ required: [Path, Kind]
+ properties:
+ Path:
+ description: |
+ Path to file or directory that has changed.
+ type: "string"
+ x-nullable: false
+ Kind:
+ $ref: "#/definitions/ChangeType"
+
+ ChangeType:
+ description: |
+ Kind of change
+
+ Can be one of:
+
+ - `0`: Modified ("C")
+ - `1`: Added ("A")
+ - `2`: Deleted ("D")
+ type: "integer"
+ format: "uint8"
+ enum: [0, 1, 2]
+ x-nullable: false
+
+ ImageInspect:
+ description: |
+ Information about an image in the local image cache.
+ type: "object"
+ properties:
+ Id:
+ description: |
+ ID is the content-addressable ID of an image.
+
+ This identifier is a content-addressable digest calculated from the
+ image's configuration (which includes the digests of layers used by
+ the image).
+
+ Note that this digest differs from the `RepoDigests` below, which
+ holds digests of image manifests that reference the image.
+ type: "string"
+ x-nullable: false
+ example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
+ Descriptor:
+ description: |
+ Descriptor is an OCI descriptor of the image target.
+ In case of a multi-platform image, this descriptor points to the OCI index
+ or a manifest list.
+
+ This field is only present if the daemon provides a multi-platform image store.
+
+ WARNING: This is experimental and may change at any time without any backward
+ compatibility.
+ x-nullable: true
+ $ref: "#/definitions/OCIDescriptor"
+ Manifests:
+ description: |
+ Manifests is a list of image manifests available in this image. It
+ provides a more detailed view of the platform-specific image manifests or
+ other image-attached data like build attestations.
+
+ Only available if the daemon provides a multi-platform image store
+ and the `manifests` option is set in the inspect request.
+
+ WARNING: This is experimental and may change at any time without any backward
+ compatibility.
+ type: "array"
+ x-nullable: true
+ items:
+ $ref: "#/definitions/ImageManifestSummary"
+ RepoTags:
+ description: |
+ List of image names/tags in the local image cache that reference this
+ image.
+
+ Multiple image tags can refer to the same image, and this list may be
+ empty if no tags reference the image, in which case the image is
+ "untagged", in which case it can still be referenced by its ID.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "example:1.0"
+ - "example:latest"
+ - "example:stable"
+ - "internal.registry.example.com:5000/example:1.0"
+ RepoDigests:
+ description: |
+ List of content-addressable digests of locally available image manifests
+ that the image is referenced from. Multiple manifests can refer to the
+ same image.
+
+ These digests are usually only available if the image was either pulled
+ from a registry, or if the image was pushed to a registry, which is when
+ the manifest is generated and its digest calculated.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb"
+ - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ Parent:
+ description: |
+ ID of the parent image.
+
+ Depending on how the image was created, this field may be empty and
+ is only set for images that were built/created locally. This field
+ is empty if the image was pulled from an image registry.
+ type: "string"
+ x-nullable: false
+ example: ""
+ Comment:
+ description: |
+ Optional message that was set when committing or importing the image.
+ type: "string"
+ x-nullable: false
+ example: ""
+ Created:
+ description: |
+ Date and time at which the image was created, formatted in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+
+ This information is only available if present in the image,
+ and omitted otherwise.
+ type: "string"
+ format: "dateTime"
+ x-nullable: true
+ example: "2022-02-04T21:20:12.497794809Z"
+ DockerVersion:
+ description: |
+ The version of Docker that was used to build the image.
+
+ Depending on how the image was created, this field may be empty.
+ type: "string"
+ x-nullable: false
+ example: "27.0.1"
+ Author:
+ description: |
+ Name of the author that was specified when committing the image, or as
+ specified through MAINTAINER (deprecated) in the Dockerfile.
+ type: "string"
+ x-nullable: false
+ example: ""
+ Config:
+ $ref: "#/definitions/ImageConfig"
+ Architecture:
+ description: |
+ Hardware CPU architecture that the image runs on.
+ type: "string"
+ x-nullable: false
+ example: "arm"
+ Variant:
+ description: |
+ CPU architecture variant (presently ARM-only).
+ type: "string"
+ x-nullable: true
+ example: "v7"
+ Os:
+ description: |
+ Operating System the image is built to run on.
+ type: "string"
+ x-nullable: false
+ example: "linux"
+ OsVersion:
+ description: |
+ Operating System version the image is built to run on (especially
+ for Windows).
+ type: "string"
+ example: ""
+ x-nullable: true
+ Size:
+ description: |
+ Total size of the image including all layers it is composed of.
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ example: 1239828
+ VirtualSize:
+ description: |
+ Total size of the image including all layers it is composed of.
+
+ Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
+ type: "integer"
+ format: "int64"
+ example: 1239828
+ GraphDriver:
+ $ref: "#/definitions/DriverData"
+ RootFS:
+ description: |
+ Information about the image's RootFS, including the layer IDs.
+ type: "object"
+ required: [Type]
+ properties:
+ Type:
+ type: "string"
+ x-nullable: false
+ example: "layers"
+ Layers:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
+ - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ Metadata:
+ description: |
+ Additional metadata of the image in the local cache. This information
+ is local to the daemon, and not part of the image itself.
+ type: "object"
+ properties:
+ LastTagTime:
+ description: |
+ Date and time at which the image was last tagged in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+
+ This information is only available if the image was tagged locally,
+ and omitted otherwise.
+ type: "string"
+ format: "dateTime"
+ example: "2022-02-28T14:40:02.623929178Z"
+ x-nullable: true
+
+ ImageSummary:
+ type: "object"
+ x-go-name: "Summary"
+ required:
+ - Id
+ - ParentId
+ - RepoTags
+ - RepoDigests
+ - Created
+ - Size
+ - SharedSize
+ - Labels
+ - Containers
+ properties:
+ Id:
+ description: |
+ ID is the content-addressable ID of an image.
+
+ This identifier is a content-addressable digest calculated from the
+ image's configuration (which includes the digests of layers used by
+ the image).
+
+ Note that this digest differs from the `RepoDigests` below, which
+ holds digests of image manifests that reference the image.
+ type: "string"
+ x-nullable: false
+ example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
+ ParentId:
+ description: |
+ ID of the parent image.
+
+ Depending on how the image was created, this field may be empty and
+ is only set for images that were built/created locally. This field
+ is empty if the image was pulled from an image registry.
+ type: "string"
+ x-nullable: false
+ example: ""
+ RepoTags:
+ description: |
+ List of image names/tags in the local image cache that reference this
+ image.
+
+ Multiple image tags can refer to the same image, and this list may be
+ empty if no tags reference the image, in which case the image is
+ "untagged", in which case it can still be referenced by its ID.
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ example:
+ - "example:1.0"
+ - "example:latest"
+ - "example:stable"
+ - "internal.registry.example.com:5000/example:1.0"
+ RepoDigests:
+ description: |
+ List of content-addressable digests of locally available image manifests
+ that the image is referenced from. Multiple manifests can refer to the
+ same image.
+
+ These digests are usually only available if the image was either pulled
+ from a registry, or if the image was pushed to a registry, which is when
+ the manifest is generated and its digest calculated.
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ example:
+ - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb"
+ - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ Created:
+ description: |
+ Date and time at which the image was created as a Unix timestamp
+ (number of seconds since EPOCH).
+ type: "integer"
+ x-nullable: false
+ example: "1644009612"
+ Size:
+ description: |
+ Total size of the image including all layers it is composed of.
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ example: 172064416
+ SharedSize:
+ description: |
+ Total size of image layers that are shared between this image and other
+ images.
+
+ This size is not calculated by default. `-1` indicates that the value
+ has not been set / calculated.
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ example: 1239828
+ VirtualSize:
+ description: |-
+ Total size of the image including all layers it is composed of.
+
+ Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
+ type: "integer"
+ format: "int64"
+ example: 172064416
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Containers:
+ description: |
+ Number of containers using this image. Includes both stopped and running
+ containers.
+
+ This size is not calculated by default, and depends on which API endpoint
+ is used. `-1` indicates that the value has not been set / calculated.
+ x-nullable: false
+ type: "integer"
+ example: 2
+ Manifests:
+ description: |
+ Manifests is a list of manifests available in this image.
+ It provides a more detailed view of the platform-specific image manifests
+ or other image-attached data like build attestations.
+
+ WARNING: This is experimental and may change at any time without any backward
+ compatibility.
+ type: "array"
+ x-nullable: false
+ x-omitempty: true
+ items:
+ $ref: "#/definitions/ImageManifestSummary"
+ Descriptor:
+ description: |
+ Descriptor is an OCI descriptor of the image target.
+ In case of a multi-platform image, this descriptor points to the OCI index
+ or a manifest list.
+
+ This field is only present if the daemon provides a multi-platform image store.
+
+ WARNING: This is experimental and may change at any time without any backward
+ compatibility.
+ x-nullable: true
+ $ref: "#/definitions/OCIDescriptor"
+
+ AuthConfig:
+ type: "object"
+ properties:
+ username:
+ type: "string"
+ password:
+ type: "string"
+ email:
+ type: "string"
+ serveraddress:
+ type: "string"
+ example:
+ username: "hannibal"
+ password: "xxxx"
+ serveraddress: "https://index.docker.io/v1/"
+
+ ProcessConfig:
+ type: "object"
+ properties:
+ privileged:
+ type: "boolean"
+ user:
+ type: "string"
+ tty:
+ type: "boolean"
+ entrypoint:
+ type: "string"
+ arguments:
+ type: "array"
+ items:
+ type: "string"
+
+ Volume:
+ type: "object"
+ required: [Name, Driver, Mountpoint, Labels, Scope, Options]
+ properties:
+ Name:
+ type: "string"
+ description: "Name of the volume."
+ x-nullable: false
+ example: "tardis"
+ Driver:
+ type: "string"
+ description: "Name of the volume driver used by the volume."
+ x-nullable: false
+ example: "custom"
+ Mountpoint:
+ type: "string"
+ description: "Mount path of the volume on the host."
+ x-nullable: false
+ example: "/var/lib/docker/volumes/tardis"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ description: "Date/Time the volume was created."
+ example: "2016-06-07T20:31:11.853781916Z"
+ Status:
+ type: "object"
+ description: |
+ Low-level details about the volume, provided by the volume driver.
+ Details are returned as a map with key/value pairs:
+ `{"key":"value","key2":"value2"}`.
+
+ The `Status` field is optional, and is omitted if the volume driver
+ does not support this feature.
+ additionalProperties:
+ type: "object"
+ example:
+ hello: "world"
+ Labels:
+ type: "object"
+ description: "User-defined key/value metadata."
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Scope:
+ type: "string"
+ description: |
+ The level at which the volume exists. Either `global` for cluster-wide,
+ or `local` for machine level.
+ default: "local"
+ x-nullable: false
+ enum: ["local", "global"]
+ example: "local"
+ ClusterVolume:
+ $ref: "#/definitions/ClusterVolume"
+ Options:
+ type: "object"
+ description: |
+ The driver specific options used when creating the volume.
+ additionalProperties:
+ type: "string"
+ example:
+ device: "tmpfs"
+ o: "size=100m,uid=1000"
+ type: "tmpfs"
+ UsageData:
+ type: "object"
+ x-nullable: true
+ x-go-name: "UsageData"
+ required: [Size, RefCount]
+ description: |
+ Usage details about the volume. This information is used by the
+ `GET /system/df` endpoint, and omitted in other endpoints.
+ properties:
+ Size:
+ type: "integer"
+ format: "int64"
+ default: -1
+ description: |
+ Amount of disk space used by the volume (in bytes). This information
+ is only available for volumes created with the `"local"` volume
+ driver. For volumes created with other volume drivers, this field
+ is set to `-1` ("not available")
+ x-nullable: false
+ RefCount:
+ type: "integer"
+ format: "int64"
+ default: -1
+ description: |
+ The number of containers referencing this volume. This field
+ is set to `-1` if the reference-count is not available.
+ x-nullable: false
+
+ VolumeCreateOptions:
+ description: "Volume configuration"
+ type: "object"
+ title: "VolumeConfig"
+ x-go-name: "CreateOptions"
+ properties:
+ Name:
+ description: |
+ The new volume's name. If not specified, Docker generates a name.
+ type: "string"
+ x-nullable: false
+ example: "tardis"
+ Driver:
+ description: "Name of the volume driver to use."
+ type: "string"
+ default: "local"
+ x-nullable: false
+ example: "custom"
+ DriverOpts:
+ description: |
+ A mapping of driver options and values. These options are
+ passed directly to the driver and are driver specific.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ device: "tmpfs"
+ o: "size=100m,uid=1000"
+ type: "tmpfs"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ ClusterVolumeSpec:
+ $ref: "#/definitions/ClusterVolumeSpec"
+
+ VolumeListResponse:
+ type: "object"
+ title: "VolumeListResponse"
+ x-go-name: "ListResponse"
+ description: "Volume list response"
+ properties:
+ Volumes:
+ type: "array"
+ description: "List of volumes"
+ items:
+ $ref: "#/definitions/Volume"
+ Warnings:
+ type: "array"
+ description: |
+ Warnings that occurred when fetching the list of volumes.
+ items:
+ type: "string"
+ example: []
+
+ Network:
+ type: "object"
+ properties:
+ Name:
+ description: |
+ Name of the network.
+ type: "string"
+ example: "my_network"
+ Id:
+ description: |
+ ID that uniquely identifies a network on a single machine.
+ type: "string"
+ example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
+ Created:
+ description: |
+ Date and time at which the network was created in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-10-19T04:33:30.360899459Z"
+ Scope:
+ description: |
+ The level at which the network exists (e.g. `swarm` for cluster-wide
+ or `local` for machine level)
+ type: "string"
+ example: "local"
+ Driver:
+ description: |
+ The name of the driver used to create the network (e.g. `bridge`,
+ `overlay`).
+ type: "string"
+ example: "overlay"
+ EnableIPv4:
+ description: |
+ Whether the network was created with IPv4 enabled.
+ type: "boolean"
+ example: true
+ EnableIPv6:
+ description: |
+ Whether the network was created with IPv6 enabled.
+ type: "boolean"
+ example: false
+ IPAM:
+ $ref: "#/definitions/IPAM"
+ Internal:
+ description: |
+ Whether the network is created to only allow internal networking
+ connectivity.
+ type: "boolean"
+ default: false
+ example: false
+ Attachable:
+ description: |
+ Whether a global / swarm scope network is manually attachable by regular
+ containers from workers in swarm mode.
+ type: "boolean"
+ default: false
+ example: false
+ Ingress:
+ description: |
+ Whether the network is providing the routing-mesh for the swarm cluster.
+ type: "boolean"
+ default: false
+ example: false
+ ConfigFrom:
+ $ref: "#/definitions/ConfigReference"
+ ConfigOnly:
+ description: |
+ Whether the network is a config-only network. Config-only networks are
+ placeholder networks for network configurations to be used by other
+ networks. Config-only networks cannot be used directly to run containers
+ or services.
+ type: "boolean"
+ default: false
+ Containers:
+ description: |
+ Contains endpoints attached to the network.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/NetworkContainer"
+ example:
+ 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
+ Name: "test"
+ EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
+ MacAddress: "02:42:ac:13:00:02"
+ IPv4Address: "172.19.0.2/16"
+ IPv6Address: ""
+ Options:
+ description: |
+ Network-specific options uses when creating the network.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Peers:
+ description: |
+ List of peer nodes for an overlay network. This field is only present
+ for overlay networks, and omitted for other network types.
+ type: "array"
+ items:
+ $ref: "#/definitions/PeerInfo"
+ x-nullable: true
+ # TODO: Add Services (only present when "verbose" is set).
+
+ ConfigReference:
+ description: |
+ The config-only network source to provide the configuration for
+ this network.
+ type: "object"
+ properties:
+ Network:
+ description: |
+ The name of the config-only network that provides the network's
+ configuration. The specified network must be an existing config-only
+ network. Only network names are allowed, not network IDs.
+ type: "string"
+ example: "config_only_network_01"
+
+ IPAM:
+ type: "object"
+ properties:
+ Driver:
+ description: "Name of the IPAM driver to use."
+ type: "string"
+ default: "default"
+ example: "default"
+ Config:
+ description: |
+ List of IPAM configuration options, specified as a map:
+
+ ```
+ {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>}
+ ```
+ type: "array"
+ items:
+ $ref: "#/definitions/IPAMConfig"
+ Options:
+ description: "Driver-specific options, specified as a map."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ foo: "bar"
+
+ IPAMConfig:
+ type: "object"
+ properties:
+ Subnet:
+ type: "string"
+ example: "172.20.0.0/16"
+ IPRange:
+ type: "string"
+ example: "172.20.10.0/24"
+ Gateway:
+ type: "string"
+ example: "172.20.10.11"
+ AuxiliaryAddresses:
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ NetworkContainer:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ example: "container_1"
+ EndpointID:
+ type: "string"
+ example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
+ MacAddress:
+ type: "string"
+ example: "02:42:ac:13:00:02"
+ IPv4Address:
+ type: "string"
+ example: "172.19.0.2/16"
+ IPv6Address:
+ type: "string"
+ example: ""
+
+ PeerInfo:
+ description: |
+ PeerInfo represents one peer of an overlay network.
+ type: "object"
+ properties:
+ Name:
+ description:
+ ID of the peer-node in the Swarm cluster.
+ type: "string"
+ example: "6869d7c1732b"
+ IP:
+ description:
+ IP-address of the peer-node in the Swarm cluster.
+ type: "string"
+ example: "10.133.77.91"
+
+ NetworkCreateResponse:
+ description: "OK response to NetworkCreate operation"
+ type: "object"
+ title: "NetworkCreateResponse"
+ x-go-name: "CreateResponse"
+ required: [Id, Warning]
+ properties:
+ Id:
+ description: "The ID of the created network."
+ type: "string"
+ x-nullable: false
+ example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d"
+ Warning:
+ description: "Warnings encountered when creating the container"
+ type: "string"
+ x-nullable: false
+ example: ""
+
+ BuildInfo:
+ type: "object"
+ properties:
+ id:
+ type: "string"
+ stream:
+ type: "string"
+ error:
+ type: "string"
+ x-nullable: true
+ description: |-
+ errors encountered during the operation.
+
+
+ > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
+ errorDetail:
+ $ref: "#/definitions/ErrorDetail"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ x-nullable: true
+ description: |-
+ Progress is a pre-formatted presentation of progressDetail.
+
+
+ > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+ aux:
+ $ref: "#/definitions/ImageID"
+
+ BuildCache:
+ type: "object"
+ description: |
+ BuildCache contains information about a build cache record.
+ properties:
+ ID:
+ type: "string"
+ description: |
+ Unique ID of the build cache record.
+ example: "ndlpt0hhvkqcdfkputsk4cq9c"
+ Parent:
+ description: |
+ ID of the parent build cache record.
+
+ > **Deprecated**: This field is deprecated, and omitted if empty.
+ type: "string"
+ x-nullable: true
+ example: ""
+ Parents:
+ description: |
+ List of parent build cache record IDs.
+ type: "array"
+ items:
+ type: "string"
+ x-nullable: true
+ example: ["hw53o5aio51xtltp5xjp8v7fx"]
+ Type:
+ type: "string"
+ description: |
+ Cache record type.
+ example: "regular"
+ # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
+ enum:
+ - "internal"
+ - "frontend"
+ - "source.local"
+ - "source.git.checkout"
+ - "exec.cachemount"
+ - "regular"
+ Description:
+ type: "string"
+ description: |
+ Description of the build-step that produced the build cache.
+ example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
+ InUse:
+ type: "boolean"
+ description: |
+ Indicates if the build cache is in use.
+ example: false
+ Shared:
+ type: "boolean"
+ description: |
+ Indicates if the build cache is shared.
+ example: true
+ Size:
+ description: |
+ Amount of disk space used by the build cache (in bytes).
+ type: "integer"
+ example: 51
+ CreatedAt:
+ description: |
+ Date and time at which the build cache was created in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ LastUsedAt:
+ description: |
+ Date and time at which the build cache was last used in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ x-nullable: true
+ example: "2017-08-09T07:09:37.632105588Z"
+ UsageCount:
+ type: "integer"
+ example: 26
+
+ ImageID:
+ type: "object"
+ description: "Image ID or Digest"
+ properties:
+ ID:
+ type: "string"
+ example:
+ ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
+
+ CreateImageInfo:
+ type: "object"
+ properties:
+ id:
+ type: "string"
+ error:
+ type: "string"
+ x-nullable: true
+ description: |-
+ errors encountered during the operation.
+
+
+ > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
+ errorDetail:
+ $ref: "#/definitions/ErrorDetail"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ x-nullable: true
+ description: |-
+ Progress is a pre-formatted presentation of progressDetail.
+
+
+ > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ PushImageInfo:
+ type: "object"
+ properties:
+ error:
+ type: "string"
+ x-nullable: true
+ description: |-
+ errors encountered during the operation.
+
+
+ > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead.
+ errorDetail:
+ $ref: "#/definitions/ErrorDetail"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ x-nullable: true
+ description: |-
+ Progress is a pre-formatted presentation of progressDetail.
+
+
+ > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead.
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ ErrorDetail:
+ type: "object"
+ properties:
+ code:
+ type: "integer"
+ message:
+ type: "string"
+
+ ProgressDetail:
+ type: "object"
+ properties:
+ current:
+ type: "integer"
+ total:
+ type: "integer"
+
+ ErrorResponse:
+ description: "Represents an error."
+ type: "object"
+ required: ["message"]
+ properties:
+ message:
+ description: "The error message."
+ type: "string"
+ x-nullable: false
+ example:
+ message: "Something went wrong."
+
+ IDResponse:
+ description: "Response to an API call that returns just an Id"
+ type: "object"
+ x-go-name: "IDResponse"
+ required: ["Id"]
+ properties:
+ Id:
+ description: "The id of the newly created object."
+ type: "string"
+ x-nullable: false
+
+ EndpointSettings:
+ description: "Configuration for a network endpoint."
+ type: "object"
+ properties:
+ # Configurations
+ IPAMConfig:
+ $ref: "#/definitions/EndpointIPAMConfig"
+ Links:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "container_1"
+ - "container_2"
+ MacAddress:
+ description: |
+ MAC address for the endpoint on this network. The network driver might ignore this parameter.
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "server_x"
+ - "server_y"
+ DriverOpts:
+ description: |
+ DriverOpts is a mapping of driver options and values. These options
+ are passed directly to the driver and are driver specific.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ GwPriority:
+ description: |
+ This property determines which endpoint will provide the default
+ gateway for a container. The endpoint with the highest priority will
+ be used. If multiple endpoints have the same priority, endpoints are
+ lexicographically sorted based on their network name, and the one
+ that sorts first is picked.
+ type: "number"
+ example:
+ - 10
+
+ # Operational data
+ NetworkID:
+ description: |
+ Unique ID of the network.
+ type: "string"
+ example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
+ EndpointID:
+ description: |
+ Unique ID for the service endpoint in a Sandbox.
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for this network.
+ type: "string"
+ example: "172.17.0.1"
+ IPAddress:
+ description: |
+ IPv4 address.
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address.
+ type: "string"
+ example: "2001:db8:2::100"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address.
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+ type: "integer"
+ format: "int64"
+ example: 64
+ DNSNames:
+ description: |
+ List of all DNS names an endpoint has on a specific network. This
+ list is based on the container name, network aliases, container short
+ ID, and hostname.
+
+ These DNS names are non-fully qualified but can contain several dots.
+ You can get fully qualified DNS names by appending `.<network-name>`.
+ For instance, if container name is `my.ctr` and the network is named
+ `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be
+ `my.ctr.testnet`.
+ type: array
+ items:
+ type: string
+ example: ["foobar", "server_x", "server_y", "my.ctr"]
+
+ EndpointIPAMConfig:
+ description: |
+ EndpointIPAMConfig represents an endpoint's IPAM configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ IPv4Address:
+ type: "string"
+ example: "172.20.30.33"
+ IPv6Address:
+ type: "string"
+ example: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "169.254.34.68"
+ - "fe80::3468"
+
+ PluginMount:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Source, Destination, Type, Options]
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "some-mount"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "This is a mount that's used by the plugin."
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Source:
+ type: "string"
+ example: "/var/lib/docker/plugins/"
+ Destination:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/state"
+ Type:
+ type: "string"
+ x-nullable: false
+ example: "bind"
+ Options:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "rbind"
+ - "rw"
+
+ PluginDevice:
+ type: "object"
+ required: [Name, Description, Settable, Path]
+ x-nullable: false
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ Description:
+ type: "string"
+ x-nullable: false
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Path:
+ type: "string"
+ example: "/dev/fuse"
+
+ PluginEnv:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ Description:
+ x-nullable: false
+ type: "string"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "string"
+
+ PluginInterfaceType:
+ type: "object"
+ x-nullable: false
+ required: [Prefix, Capability, Version]
+ properties:
+ Prefix:
+ type: "string"
+ x-nullable: false
+ Capability:
+ type: "string"
+ x-nullable: false
+ Version:
+ type: "string"
+ x-nullable: false
+
+ PluginPrivilege:
+ description: |
+ Describes a permission the user has to accept upon installing
+ the plugin.
+ type: "object"
+ x-go-name: "PluginPrivilege"
+ properties:
+ Name:
+ type: "string"
+ example: "network"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "host"
+
+ Plugin:
+ description: "A plugin for the Engine API"
+ type: "object"
+ required: [Settings, Enabled, Config, Name]
+ properties:
+ Id:
+ type: "string"
+ example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "tiborvass/sample-volume-plugin"
+ Enabled:
+ description:
+ True if the plugin is running. False if the plugin is not running,
+ only installed.
+ type: "boolean"
+ x-nullable: false
+ example: true
+ Settings:
+ description: "Settings that can be modified by users."
+ type: "object"
+ x-nullable: false
+ required: [Args, Devices, Env, Mounts]
+ properties:
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "DEBUG=0"
+ Args:
+ type: "array"
+ items:
+ type: "string"
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PluginReference:
+ description: "plugin remote reference used to push/pull the plugin"
+ type: "string"
+ x-nullable: false
+ example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
+ Config:
+ description: "The config of a plugin."
+ type: "object"
+ x-nullable: false
+ required:
+ - Description
+ - Documentation
+ - Interface
+ - Entrypoint
+ - WorkDir
+ - Network
+ - Linux
+ - PidHost
+ - PropagatedMount
+ - IpcHost
+ - Mounts
+ - Env
+ - Args
+ properties:
+ DockerVersion:
+ description: "Docker Version used to create the plugin"
+ type: "string"
+ x-nullable: false
+ example: "17.06.0-ce"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "A sample volume plugin for Docker"
+ Documentation:
+ type: "string"
+ x-nullable: false
+ example: "https://docs.docker.com/engine/extend/plugins/"
+ Interface:
+ description: "The interface between Docker and the plugin"
+ x-nullable: false
+ type: "object"
+ required: [Types, Socket]
+ properties:
+ Types:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginInterfaceType"
+ example:
+ - "docker.volumedriver/1.0"
+ Socket:
+ type: "string"
+ x-nullable: false
+ example: "plugins.sock"
+ ProtocolScheme:
+ type: "string"
+ example: "some.protocol/v1.0"
+ description: "Protocol to use for clients connecting to the plugin."
+ enum:
+ - ""
+ - "moby.plugins.http/v1"
+ Entrypoint:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "/usr/bin/sample-volume-plugin"
+ - "/data"
+ WorkDir:
+ type: "string"
+ x-nullable: false
+ example: "/bin/"
+ User:
+ type: "object"
+ x-nullable: false
+ properties:
+ UID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ GID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ Network:
+ type: "object"
+ x-nullable: false
+ required: [Type]
+ properties:
+ Type:
+ x-nullable: false
+ type: "string"
+ example: "host"
+ Linux:
+ type: "object"
+ x-nullable: false
+ required: [Capabilities, AllowAllDevices, Devices]
+ properties:
+ Capabilities:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "CAP_SYS_ADMIN"
+ - "CAP_SYSLOG"
+ AllowAllDevices:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PropagatedMount:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/volumes"
+ IpcHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ PidHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginEnv"
+ example:
+ - Name: "DEBUG"
+ Description: "If set, prints debug messages"
+ Settable: null
+ Value: "0"
+ Args:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ example: "args"
+ Description:
+ x-nullable: false
+ type: "string"
+ example: "command line arguments"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ rootfs:
+ type: "object"
+ properties:
+ type:
+ type: "string"
+ example: "layers"
+ diff_ids:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
+ - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
+
+ ObjectVersion:
+ description: |
+ The version number of the object such as node, service, etc. This is needed
+ to avoid conflicting writes. The client must send the version number along
+ with the modified specification when updating these objects.
+
+ This approach ensures safe concurrency and determinism in that the change
+ on the object may not be applied if the version number has changed from the
+ last read. In other words, if two update requests specify the same base
+ version, only one of the requests can succeed. As a result, two separate
+ update requests that happen at the same time will not unintentionally
+ overwrite each other.
+ type: "object"
+ properties:
+ Index:
+ type: "integer"
+ format: "uint64"
+ example: 373531
+
+ NodeSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "Name for the node."
+ type: "string"
+ example: "my-node"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Role:
+ description: "Role of the node."
+ type: "string"
+ enum:
+ - "worker"
+ - "manager"
+ example: "manager"
+ Availability:
+ description: "Availability of the node."
+ type: "string"
+ enum:
+ - "active"
+ - "pause"
+ - "drain"
+ example: "active"
+ example:
+ Availability: "active"
+ Name: "node-name"
+ Role: "manager"
+ Labels:
+ foo: "bar"
+
+ Node:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "24ifsmvkjbyhk"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the node was added to the swarm in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the node was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/NodeSpec"
+ Description:
+ $ref: "#/definitions/NodeDescription"
+ Status:
+ $ref: "#/definitions/NodeStatus"
+ ManagerStatus:
+ $ref: "#/definitions/ManagerStatus"
+
+ NodeDescription:
+ description: |
+ NodeDescription encapsulates the properties of the Node as reported by the
+ agent.
+ type: "object"
+ properties:
+ Hostname:
+ type: "string"
+ example: "bf3067039e47"
+ Platform:
+ $ref: "#/definitions/Platform"
+ Resources:
+ $ref: "#/definitions/ResourceObject"
+ Engine:
+ $ref: "#/definitions/EngineDescription"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+
+ Platform:
+ description: |
+ Platform represents the platform (Arch/OS).
+ type: "object"
+ properties:
+ Architecture:
+ description: |
+ Architecture represents the hardware architecture (for example,
+ `x86_64`).
+ type: "string"
+ example: "x86_64"
+ OS:
+ description: |
+ OS represents the Operating System (for example, `linux` or `windows`).
+ type: "string"
+ example: "linux"
+
+ EngineDescription:
+ description: "EngineDescription provides information about an engine."
+ type: "object"
+ properties:
+ EngineVersion:
+ type: "string"
+ example: "17.06.0"
+ Labels:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ foo: "bar"
+ Plugins:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Type:
+ type: "string"
+ Name:
+ type: "string"
+ example:
+ - Type: "Log"
+ Name: "awslogs"
+ - Type: "Log"
+ Name: "fluentd"
+ - Type: "Log"
+ Name: "gcplogs"
+ - Type: "Log"
+ Name: "gelf"
+ - Type: "Log"
+ Name: "journald"
+ - Type: "Log"
+ Name: "json-file"
+ - Type: "Log"
+ Name: "splunk"
+ - Type: "Log"
+ Name: "syslog"
+ - Type: "Network"
+ Name: "bridge"
+ - Type: "Network"
+ Name: "host"
+ - Type: "Network"
+ Name: "ipvlan"
+ - Type: "Network"
+ Name: "macvlan"
+ - Type: "Network"
+ Name: "null"
+ - Type: "Network"
+ Name: "overlay"
+ - Type: "Volume"
+ Name: "local"
+ - Type: "Volume"
+ Name: "localhost:5000/vieux/sshfs:latest"
+ - Type: "Volume"
+ Name: "vieux/sshfs:latest"
+
+ TLSInfo:
+ description: |
+ Information about the issuer of leaf TLS certificates and the trusted root
+ CA certificate.
+ type: "object"
+ properties:
+ TrustRoot:
+ description: |
+ The root CA certificate(s) that are used to validate leaf TLS
+ certificates.
+ type: "string"
+ CertIssuerSubject:
+ description:
+ The base64-url-safe-encoded raw subject bytes of the issuer.
+ type: "string"
+ CertIssuerPublicKey:
+ description: |
+ The base64-url-safe-encoded raw public key bytes of the issuer.
+ type: "string"
+ example:
+ TrustRoot: |
+ -----BEGIN CERTIFICATE-----
+ MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
+ EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
+ MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+ A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
+ 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+ Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
+ PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
+ pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
+ -----END CERTIFICATE-----
+ CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
+ CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
+
+ NodeStatus:
+ description: |
+ NodeStatus represents the status of a node.
+
+ It provides the current status of the node, as seen by the manager.
+ type: "object"
+ properties:
+ State:
+ $ref: "#/definitions/NodeState"
+ Message:
+ type: "string"
+ example: ""
+ Addr:
+ description: "IP address of the node."
+ type: "string"
+ example: "172.17.0.2"
+
+ NodeState:
+ description: "NodeState represents the state of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "down"
+ - "ready"
+ - "disconnected"
+ example: "ready"
+
+ ManagerStatus:
+ description: |
+ ManagerStatus represents the status of a manager.
+
+ It provides the current status of a node's manager component, if the node
+ is a manager.
+ x-nullable: true
+ type: "object"
+ properties:
+ Leader:
+ type: "boolean"
+ default: false
+ example: true
+ Reachability:
+ $ref: "#/definitions/Reachability"
+ Addr:
+ description: |
+ The IP address and port at which the manager is reachable.
+ type: "string"
+ example: "10.0.0.46:2377"
+
+ Reachability:
+ description: "Reachability represents the reachability of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "unreachable"
+ - "reachable"
+ example: "reachable"
+
+ SwarmSpec:
+ description: "User modifiable swarm configuration."
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the swarm."
+ type: "string"
+ example: "default"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.corp.type: "production"
+ com.example.corp.department: "engineering"
+ Orchestration:
+ description: "Orchestration configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ TaskHistoryRetentionLimit:
+ description: |
+ The number of historic tasks to keep per instance or node. If
+ negative, never remove completed or failed tasks.
+ type: "integer"
+ format: "int64"
+ example: 10
+ Raft:
+ description: "Raft configuration."
+ type: "object"
+ properties:
+ SnapshotInterval:
+ description: "The number of log entries between snapshots."
+ type: "integer"
+ format: "uint64"
+ example: 10000
+ KeepOldSnapshots:
+ description: |
+ The number of snapshots to keep beyond the current snapshot.
+ type: "integer"
+ format: "uint64"
+ LogEntriesForSlowFollowers:
+ description: |
+ The number of log entries to keep around to sync up slow followers
+ after a snapshot is created.
+ type: "integer"
+ format: "uint64"
+ example: 500
+ ElectionTick:
+ description: |
+ The number of ticks that a follower will wait for a message from
+ the leader before becoming a candidate and starting an election.
+ `ElectionTick` must be greater than `HeartbeatTick`.
+
+ A tick currently defaults to one second, so these translate
+ directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 3
+ HeartbeatTick:
+ description: |
+ The number of ticks between heartbeats. Every HeartbeatTick ticks,
+ the leader will send a heartbeat to the followers.
+
+ A tick currently defaults to one second, so these translate
+ directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 1
+ Dispatcher:
+ description: "Dispatcher configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ HeartbeatPeriod:
+ description: |
+ The delay for an agent to send a heartbeat to the dispatcher.
+ type: "integer"
+ format: "int64"
+ example: 5000000000
+ CAConfig:
+ description: "CA configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ NodeCertExpiry:
+ description: "The duration node certificates are issued for."
+ type: "integer"
+ format: "int64"
+ example: 7776000000000000
+ ExternalCAs:
+ description: |
+ Configuration for forwarding signing requests to an external
+ certificate authority.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Protocol:
+ description: |
+ Protocol for communication with the external CA (currently
+ only `cfssl` is supported).
+ type: "string"
+ enum:
+ - "cfssl"
+ default: "cfssl"
+ URL:
+ description: |
+ URL where certificate signing requests should be sent.
+ type: "string"
+ Options:
+ description: |
+ An object with key/value pairs that are interpreted as
+ protocol-specific options for the external CA driver.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ CACert:
+ description: |
+ The root CA certificate (in PEM format) this external CA uses
+ to issue TLS certificates (assumed to be to the current swarm
+ root CA certificate if not provided).
+ type: "string"
+ SigningCACert:
+ description: |
+ The desired signing CA certificate for all swarm node TLS leaf
+ certificates, in PEM format.
+ type: "string"
+ SigningCAKey:
+ description: |
+ The desired signing CA key for all swarm node TLS leaf certificates,
+ in PEM format.
+ type: "string"
+ ForceRotate:
+ description: |
+ An integer whose purpose is to force swarm to generate a new
+ signing CA certificate and key, if none have been specified in
+ `SigningCACert` and `SigningCAKey`
+ format: "uint64"
+ type: "integer"
+ EncryptionConfig:
+ description: "Parameters related to encryption-at-rest."
+ type: "object"
+ properties:
+ AutoLockManagers:
+ description: |
+ If set, generate a key and use it to lock data stored on the
+ managers.
+ type: "boolean"
+ example: false
+ TaskDefaults:
+ description: "Defaults for creating tasks in this cluster."
+ type: "object"
+ properties:
+ LogDriver:
+ description: |
+ The log driver to use for tasks created in the orchestrator if
+ unspecified by a service.
+
+ Updating this value only affects new tasks. Existing tasks continue
+ to use their previously configured log driver until recreated.
+ type: "object"
+ properties:
+ Name:
+ description: |
+ The log driver to use as a default for new tasks.
+ type: "string"
+ example: "json-file"
+ Options:
+ description: |
+ Driver-specific options for the selected log driver, specified
+ as key/value pairs.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ "max-file": "10"
+ "max-size": "100m"
+
+ # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
+ # without `JoinTokens`.
+ ClusterInfo:
+ description: |
+ ClusterInfo represents information about the swarm as is returned by the
+ "/info" endpoint. Join-tokens are not included.
+ x-nullable: true
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the swarm."
+ type: "string"
+ example: "abajmipo7b4xz5ip2nrla6b11"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the swarm was initialised in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the swarm was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+ RootRotationInProgress:
+ description: |
+ Whether there is currently a root CA rotation in progress for the swarm
+ type: "boolean"
+ example: false
+ DataPathPort:
+ description: |
+ DataPathPort specifies the data path port number for data traffic.
+ Acceptable port range is 1024 to 49151.
+ If no port is set or is set to 0, the default port (4789) is used.
+ type: "integer"
+ format: "uint32"
+ default: 4789
+ example: 4789
+ DefaultAddrPool:
+ description: |
+ Default Address Pool specifies default subnet pools for global scope
+ networks.
+ type: "array"
+ items:
+ type: "string"
+ format: "CIDR"
+ example: ["10.10.0.0/16", "20.20.0.0/16"]
+ SubnetSize:
+ description: |
+ SubnetSize specifies the subnet size of the networks created from the
+ default subnet pool.
+ type: "integer"
+ format: "uint32"
+ maximum: 29
+ default: 24
+ example: 24
+
+ JoinTokens:
+ description: |
+ JoinTokens contains the tokens workers and managers need to join the swarm.
+ type: "object"
+ properties:
+ Worker:
+ description: |
+ The token workers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
+ Manager:
+ description: |
+ The token managers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+
+ Swarm:
+ type: "object"
+ allOf:
+ - $ref: "#/definitions/ClusterInfo"
+ - type: "object"
+ properties:
+ JoinTokens:
+ $ref: "#/definitions/JoinTokens"
+
+ TaskSpec:
+ description: "User modifiable task configuration."
+ type: "object"
+ properties:
+ PluginSpec:
+ type: "object"
+ description: |
+ Plugin spec for the service. *(Experimental release only.)*
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ properties:
+ Name:
+ description: "The name or 'alias' to use for the plugin."
+ type: "string"
+ Remote:
+ description: "The plugin image reference to use."
+ type: "string"
+ Disabled:
+ description: "Disable the plugin once scheduled."
+ type: "boolean"
+ PluginPrivilege:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginPrivilege"
+ ContainerSpec:
+ type: "object"
+ description: |
+ Container spec for the service.
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ properties:
+ Image:
+ description: "The image name to use for the container"
+ type: "string"
+ Labels:
+ description: "User-defined key/value data."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Command:
+ description: "The command to be run in the image."
+ type: "array"
+ items:
+ type: "string"
+ Args:
+ description: "Arguments to the command."
+ type: "array"
+ items:
+ type: "string"
+ Hostname:
+ description: |
+ The hostname to use for the container, as a valid
+ [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.
+ type: "string"
+ Env:
+ description: |
+ A list of environment variables in the form `VAR=value`.
+ type: "array"
+ items:
+ type: "string"
+ Dir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ User:
+ description: "The user inside the container."
+ type: "string"
+ Groups:
+ type: "array"
+ description: |
+ A list of additional groups that the container process will run as.
+ items:
+ type: "string"
+ Privileges:
+ type: "object"
+ description: "Security options for the container"
+ properties:
+ CredentialSpec:
+ type: "object"
+ description: "CredentialSpec for managed service account (Windows only)"
+ properties:
+ Config:
+ type: "string"
+ example: "0bt9dmxjvjiqermk6xrop3ekq"
+ description: |
+ Load credential spec from a Swarm Config with the given ID.
+ The specified config must also be present in the Configs
+ field with the Runtime property set.
+
+ <p><br /></p>
+
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
+ File:
+ type: "string"
+ example: "spec.json"
+ description: |
+ Load credential spec from this file. The file is read by
+ the daemon, and must be present in the `CredentialSpecs`
+ subdirectory in the docker data directory, which defaults
+ to `C:\ProgramData\Docker\` on Windows.
+
+ For example, specifying `spec.json` loads
+ `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
+
+ <p><br /></p>
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
+ Registry:
+ type: "string"
+ description: |
+ Load credential spec from this value in the Windows
+ registry. The specified registry value must be located in:
+
+ `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
+
+ <p><br /></p>
+
+
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
+ SELinuxContext:
+ type: "object"
+ description: "SELinux labels of the container"
+ properties:
+ Disable:
+ type: "boolean"
+ description: "Disable SELinux"
+ User:
+ type: "string"
+ description: "SELinux user label"
+ Role:
+ type: "string"
+ description: "SELinux role label"
+ Type:
+ type: "string"
+ description: "SELinux type label"
+ Level:
+ type: "string"
+ description: "SELinux level label"
+ Seccomp:
+ type: "object"
+ description: "Options for configuring seccomp on the container"
+ properties:
+ Mode:
+ type: "string"
+ enum:
+ - "default"
+ - "unconfined"
+ - "custom"
+ Profile:
+ description: "The custom seccomp profile as a json object"
+ type: "string"
+ AppArmor:
+ type: "object"
+ description: "Options for configuring AppArmor on the container"
+ properties:
+ Mode:
+ type: "string"
+ enum:
+ - "default"
+ - "disabled"
+ NoNewPrivileges:
+ type: "boolean"
+ description: "Configuration of the no_new_privs bit in the container"
+
+ TTY:
+ description: "Whether a pseudo-TTY should be allocated."
+ type: "boolean"
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ ReadOnly:
+ description: "Mount the container's root filesystem as read only."
+ type: "boolean"
+ Mounts:
+ description: |
+ Specification for mounts to be added to containers created as part
+ of the service.
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+ StopSignal:
+ description: "Signal to stop the container."
+ type: "string"
+ StopGracePeriod:
+ description: |
+ Amount of time to wait for the container to terminate before
+ forcefully killing it.
+ type: "integer"
+ format: "int64"
+ HealthCheck:
+ $ref: "#/definitions/HealthConfig"
+ Hosts:
+ type: "array"
+ description: |
+ A list of hostname/IP mappings to add to the container's `hosts`
+ file. The format of extra hosts is specified in the
+ [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
+ man page:
+
+ IP_address canonical_hostname [aliases...]
+ items:
+ type: "string"
+ DNSConfig:
+ description: |
+ Specification for DNS related configurations in resolver configuration
+ file (`resolv.conf`).
+ type: "object"
+ properties:
+ Nameservers:
+ description: "The IP addresses of the name servers."
+ type: "array"
+ items:
+ type: "string"
+ Search:
+ description: "A search list for host-name lookup."
+ type: "array"
+ items:
+ type: "string"
+ Options:
+ description: |
+ A list of internal resolver variables to be modified (e.g.,
+ `debug`, `ndots:3`, etc.).
+ type: "array"
+ items:
+ type: "string"
+ Secrets:
+ description: |
+ Secrets contains references to zero or more secrets that will be
+ exposed to the service.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: |
+ File represents a specific target that is backed by a file.
+ type: "object"
+ properties:
+ Name:
+ description: |
+ Name represents the final filename in the filesystem.
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ SecretID:
+ description: |
+ SecretID represents the ID of the specific secret that we're
+ referencing.
+ type: "string"
+ SecretName:
+ description: |
+ SecretName is the name of the secret that this references,
+ but this is just provided for lookup/display purposes. The
+ secret in the reference will be identified by its ID.
+ type: "string"
+ OomScoreAdj:
+ type: "integer"
+ format: "int64"
+ description: |
+ An integer value containing the score given to the container in
+ order to tune OOM killer preferences.
+ example: 0
+ Configs:
+ description: |
+ Configs contains references to zero or more configs that will be
+ exposed to the service.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: |
+ File represents a specific target that is backed by a file.
+
+ <p><br /><p>
+
+ > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
+ type: "object"
+ properties:
+ Name:
+ description: |
+ Name represents the final filename in the filesystem.
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ Runtime:
+ description: |
+ Runtime represents a target that is not mounted into the
+ container but is used by the task
+
+ <p><br /><p>
+
+ > **Note**: `Configs.File` and `Configs.Runtime` are mutually
+ > exclusive
+ type: "object"
+ ConfigID:
+ description: |
+ ConfigID represents the ID of the specific config that we're
+ referencing.
+ type: "string"
+ ConfigName:
+ description: |
+ ConfigName is the name of the config that this references,
+ but this is just provided for lookup/display purposes. The
+ config in the reference will be identified by its ID.
+ type: "string"
+ Isolation:
+ type: "string"
+ description: |
+ Isolation technology of the containers running the service.
+ (Windows only)
+ enum:
+ - "default"
+ - "process"
+ - "hyperv"
+ - ""
+ Init:
+ description: |
+ Run an init inside the container that forwards signals and reaps
+ processes. This field is omitted if empty, and the default (as
+ configured on the daemon) is used.
+ type: "boolean"
+ x-nullable: true
+ Sysctls:
+ description: |
+ Set kernel namedspaced parameters (sysctls) in the container.
+ The Sysctls option on services accepts the same sysctls as the
+ are supported on containers. Note that while the same sysctls are
+ supported, no guarantees or checks are made about their
+ suitability for a clustered environment, and it's up to the user
+ to determine whether a given sysctl will work properly in a
+ Service.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ # This option is not used by Windows containers
+ CapabilityAdd:
+ type: "array"
+ description: |
+ A list of kernel capabilities to add to the default set
+ for the container.
+ items:
+ type: "string"
+ example:
+ - "CAP_NET_RAW"
+ - "CAP_SYS_ADMIN"
+ - "CAP_SYS_CHROOT"
+ - "CAP_SYSLOG"
+ CapabilityDrop:
+ type: "array"
+ description: |
+ A list of kernel capabilities to drop from the default set
+ for the container.
+ items:
+ type: "string"
+ example:
+ - "CAP_NET_RAW"
+ Ulimits:
+ description: |
+ A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Name:
+ description: "Name of ulimit"
+ type: "string"
+ Soft:
+ description: "Soft limit"
+ type: "integer"
+ Hard:
+ description: "Hard limit"
+ type: "integer"
+ NetworkAttachmentSpec:
+ description: |
+ Read-only spec type for non-swarm containers attached to swarm overlay
+ networks.
+
+ <p><br /></p>
+
+ > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
+ > mutually exclusive. PluginSpec is only used when the Runtime field
+ > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
+ > field is set to `attachment`.
+ type: "object"
+ properties:
+ ContainerID:
+ description: "ID of the container represented by this task"
+ type: "string"
+ Resources:
+ description: |
+ Resource requirements which apply to each individual container created
+ as part of the service.
+ type: "object"
+ properties:
+ Limits:
+ description: "Define resources limits."
+ $ref: "#/definitions/Limit"
+ Reservations:
+ description: "Define resources reservation."
+ $ref: "#/definitions/ResourceObject"
+ RestartPolicy:
+ description: |
+ Specification for the restart policy which applies to containers
+ created as part of this service.
+ type: "object"
+ properties:
+ Condition:
+ description: "Condition for restart."
+ type: "string"
+ enum:
+ - "none"
+ - "on-failure"
+ - "any"
+ Delay:
+ description: "Delay between restart attempts."
+ type: "integer"
+ format: "int64"
+ MaxAttempts:
+ description: |
+ Maximum attempts to restart a given container before giving up
+ (default value is 0, which is ignored).
+ type: "integer"
+ format: "int64"
+ default: 0
+ Window:
+ description: |
+ Windows is the time window used to evaluate the restart policy
+ (default value is 0, which is unbounded).
+ type: "integer"
+ format: "int64"
+ default: 0
+ Placement:
+ type: "object"
+ properties:
+ Constraints:
+ description: |
+ An array of constraint expressions to limit the set of nodes where
+ a task can be scheduled. Constraint expressions can either use a
+ _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
+ nodes that satisfy every expression (AND match). Constraints can
+ match node or Docker Engine labels as follows:
+
+ node attribute | matches | example
+ ---------------------|--------------------------------|-----------------------------------------------
+ `node.id` | Node ID | `node.id==2ivku8v2gvtg4`
+ `node.hostname` | Node hostname | `node.hostname!=node-2`
+ `node.role` | Node role (`manager`/`worker`) | `node.role==manager`
+ `node.platform.os` | Node operating system | `node.platform.os==windows`
+ `node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
+ `node.labels` | User-defined node labels | `node.labels.security==high`
+ `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04`
+
+ `engine.labels` apply to Docker Engine labels like operating system,
+ drivers, etc. Swarm administrators add `node.labels` for operational
+ purposes by using the [`node update endpoint`](#operation/NodeUpdate).
+
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "node.hostname!=node3.corp.example.com"
+ - "node.role!=manager"
+ - "node.labels.type==production"
+ - "node.platform.os==linux"
+ - "node.platform.arch==x86_64"
+ Preferences:
+ description: |
+ Preferences provide a way to make the scheduler aware of factors
+ such as topology. They are provided in order from highest to
+ lowest precedence.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Spread:
+ type: "object"
+ properties:
+ SpreadDescriptor:
+ description: |
+ label descriptor, such as `engine.labels.az`.
+ type: "string"
+ example:
+ - Spread:
+ SpreadDescriptor: "node.labels.datacenter"
+ - Spread:
+ SpreadDescriptor: "node.labels.rack"
+ MaxReplicas:
+ description: |
+ Maximum number of replicas for per node (default value is 0, which
+ is unlimited)
+ type: "integer"
+ format: "int64"
+ default: 0
+ Platforms:
+ description: |
+ Platforms stores all the platforms that the service's image can
+ run on. This field is used in the platform filter for scheduling.
+ If empty, then the platform filter is off, meaning there are no
+ scheduling restrictions.
+ type: "array"
+ items:
+ $ref: "#/definitions/Platform"
+ ForceUpdate:
+ description: |
+ A counter that triggers an update even if no relevant parameters have
+ been changed.
+ type: "integer"
+ Runtime:
+ description: |
+ Runtime is the type of runtime specified for the task executor.
+ type: "string"
+ Networks:
+ description: "Specifies which networks the service should attach to."
+ type: "array"
+ items:
+ $ref: "#/definitions/NetworkAttachmentConfig"
+ LogDriver:
+ description: |
+ Specifies the log driver to use for tasks created from this spec. If
+ not present, the default one for the swarm will be used, finally
+ falling back to the engine default if not specified.
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Options:
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ TaskState:
+ type: "string"
+ enum:
+ - "new"
+ - "allocated"
+ - "pending"
+ - "assigned"
+ - "accepted"
+ - "preparing"
+ - "ready"
+ - "starting"
+ - "running"
+ - "complete"
+ - "shutdown"
+ - "failed"
+ - "rejected"
+ - "remove"
+ - "orphaned"
+
+ ContainerStatus:
+ type: "object"
+ description: "represents the status of a container."
+ properties:
+ ContainerID:
+ type: "string"
+ PID:
+ type: "integer"
+ ExitCode:
+ type: "integer"
+
+ PortStatus:
+ type: "object"
+ description: "represents the port status of a task's host ports whose service has published host ports"
+ properties:
+ Ports:
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+
+ TaskStatus:
+ type: "object"
+ description: "represents the status of a task."
+ properties:
+ Timestamp:
+ type: "string"
+ format: "dateTime"
+ State:
+ $ref: "#/definitions/TaskState"
+ Message:
+ type: "string"
+ Err:
+ type: "string"
+ ContainerStatus:
+ $ref: "#/definitions/ContainerStatus"
+ PortStatus:
+ $ref: "#/definitions/PortStatus"
+
+ Task:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the task."
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Name:
+ description: "Name of the task."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Spec:
+ $ref: "#/definitions/TaskSpec"
+ ServiceID:
+ description: "The ID of the service this task is part of."
+ type: "string"
+ Slot:
+ type: "integer"
+ NodeID:
+ description: "The ID of the node that this task is on."
+ type: "string"
+ AssignedGenericResources:
+ $ref: "#/definitions/GenericResources"
+ Status:
+ $ref: "#/definitions/TaskStatus"
+ DesiredState:
+ $ref: "#/definitions/TaskState"
+ JobIteration:
+ description: |
+ If the Service this Task belongs to is a job-mode service, contains
+ the JobIteration of the Service this Task was created for. Absent if
+ the Task was created for a Replicated or Global Service.
+ $ref: "#/definitions/ObjectVersion"
+ example:
+ ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ AssignedGenericResources:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ ServiceSpec:
+ description: "User modifiable configuration for a service."
+ type: object
+ properties:
+ Name:
+ description: "Name of the service."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ TaskTemplate:
+ $ref: "#/definitions/TaskSpec"
+ Mode:
+ description: "Scheduling mode for the service."
+ type: "object"
+ properties:
+ Replicated:
+ type: "object"
+ properties:
+ Replicas:
+ type: "integer"
+ format: "int64"
+ Global:
+ type: "object"
+ ReplicatedJob:
+ description: |
+ The mode used for services with a finite number of tasks that run
+ to a completed state.
+ type: "object"
+ properties:
+ MaxConcurrent:
+ description: |
+ The maximum number of replicas to run simultaneously.
+ type: "integer"
+ format: "int64"
+ default: 1
+ TotalCompletions:
+ description: |
+ The total number of replicas desired to reach the Completed
+ state. If unset, will default to the value of `MaxConcurrent`
+ type: "integer"
+ format: "int64"
+ GlobalJob:
+ description: |
+ The mode used for services which run a task to the completed state
+ on each valid node.
+ type: "object"
+ UpdateConfig:
+ description: "Specification for the update strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: |
+ Maximum number of tasks to be updated in one iteration (0 means
+ unlimited parallelism).
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: "Amount of time between updates, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: |
+ Action to take if an updated task fails to run, or stops running
+ during the update.
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ - "rollback"
+ Monitor:
+ description: |
+ Amount of time to monitor each updated task for failures, in
+ nanoseconds.
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: |
+ The fraction of tasks that may fail during an update before the
+ failure action is invoked, specified as a floating point number
+ between 0 and 1.
+ type: "number"
+ default: 0
+ Order:
+ description: |
+ The order of operations when rolling out an updated task. Either
+ the old task is shut down before the new task is started, or the
+ new task is started before the old task is shut down.
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ RollbackConfig:
+ description: "Specification for the rollback strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: |
+ Maximum number of tasks to be rolled back in one iteration (0 means
+ unlimited parallelism).
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: |
+ Amount of time between rollback iterations, in nanoseconds.
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: |
+ Action to take if an rolled back task fails to run, or stops
+ running during the rollback.
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ Monitor:
+ description: |
+ Amount of time to monitor each rolled back task for failures, in
+ nanoseconds.
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: |
+ The fraction of tasks that may fail during a rollback before the
+ failure action is invoked, specified as a floating point number
+ between 0 and 1.
+ type: "number"
+ default: 0
+ Order:
+ description: |
+ The order of operations when rolling back a task. Either the old
+ task is shut down before the new task is started, or the new task
+ is started before the old task is shut down.
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ Networks:
+ description: |
+ Specifies which networks the service should attach to.
+
+ Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead.
+ type: "array"
+ items:
+ $ref: "#/definitions/NetworkAttachmentConfig"
+
+ EndpointSpec:
+ $ref: "#/definitions/EndpointSpec"
+
+ EndpointPortConfig:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Protocol:
+ type: "string"
+ enum:
+ - "tcp"
+ - "udp"
+ - "sctp"
+ TargetPort:
+ description: "The port inside the container."
+ type: "integer"
+ PublishedPort:
+ description: "The port on the swarm hosts."
+ type: "integer"
+ PublishMode:
+ description: |
+ The mode in which port is published.
+
+ <p><br /></p>
+
+ - "ingress" makes the target port accessible on every node,
+ regardless of whether there is a task for the service running on
+ that node or not.
+ - "host" bypasses the routing mesh and publish the port directly on
+ the swarm node where that service is running.
+
+ type: "string"
+ enum:
+ - "ingress"
+ - "host"
+ default: "ingress"
+ example: "ingress"
+
+ EndpointSpec:
+ description: "Properties that can be configured to access and load balance a service."
+ type: "object"
+ properties:
+ Mode:
+ description: |
+ The mode of resolution to use for internal load balancing between tasks.
+ type: "string"
+ enum:
+ - "vip"
+ - "dnsrr"
+ default: "vip"
+ Ports:
+ description: |
+ List of exposed ports that this service is accessible on from the
+ outside. Ports can only be provided if `vip` resolution mode is used.
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+
+ Service:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ServiceSpec"
+ Endpoint:
+ type: "object"
+ properties:
+ Spec:
+ $ref: "#/definitions/EndpointSpec"
+ Ports:
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+ VirtualIPs:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NetworkID:
+ type: "string"
+ Addr:
+ type: "string"
+ UpdateStatus:
+ description: "The status of a service update."
+ type: "object"
+ properties:
+ State:
+ type: "string"
+ enum:
+ - "updating"
+ - "paused"
+ - "completed"
+ StartedAt:
+ type: "string"
+ format: "dateTime"
+ CompletedAt:
+ type: "string"
+ format: "dateTime"
+ Message:
+ type: "string"
+ ServiceStatus:
+ description: |
+ The status of the service's tasks. Provided only when requested as
+ part of a ServiceList operation.
+ type: "object"
+ properties:
+ RunningTasks:
+ description: |
+ The number of tasks for the service currently in the Running state.
+ type: "integer"
+ format: "uint64"
+ example: 7
+ DesiredTasks:
+ description: |
+ The number of tasks for the service desired to be running.
+ For replicated services, this is the replica count from the
+ service spec. For global services, this is computed by taking
+ count of all tasks for the service with a Desired State other
+ than Shutdown.
+ type: "integer"
+ format: "uint64"
+ example: 10
+ CompletedTasks:
+ description: |
+ The number of tasks for a job that are in the Completed state.
+ This field must be cross-referenced with the service type, as the
+ value of 0 may mean the service is not in a job mode, or it may
+ mean the job-mode service has no tasks yet Completed.
+ type: "integer"
+ format: "uint64"
+ JobStatus:
+ description: |
+ The status of the service when it is in one of ReplicatedJob or
+ GlobalJob modes. Absent on Replicated and Global mode services. The
+ JobIteration is an ObjectVersion, but unlike the Service's version,
+ does not need to be sent with an update request.
+ type: "object"
+ properties:
+ JobIteration:
+ description: |
+ JobIteration is a value increased each time a Job is executed,
+ successfully or otherwise. "Executed", in this case, means the
+ job as a whole has been started, not that an individual Task has
+ been launched. A job is "Executed" when its ServiceSpec is
+ updated. JobIteration can be used to disambiguate Tasks belonging
+ to different executions of a job. Though JobIteration will
+ increase with each subsequent execution, it may not necessarily
+ increase by 1, and so JobIteration should not be used to
+ $ref: "#/definitions/ObjectVersion"
+ LastExecution:
+ description: |
+ The last time, as observed by the server, that this job was
+ started.
+ type: "string"
+ format: "dateTime"
+ example:
+ ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Version:
+ Index: 19
+ CreatedAt: "2016-06-07T21:05:51.880065305Z"
+ UpdatedAt: "2016-06-07T21:07:29.962229872Z"
+ Spec:
+ Name: "hopeful_cori"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Endpoint:
+ Spec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ VirtualIPs:
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.2/16"
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.3/16"
+
+ ImageDeleteResponseItem:
+ type: "object"
+ x-go-name: "DeleteResponse"
+ properties:
+ Untagged:
+ description: "The image ID of an image that was untagged"
+ type: "string"
+ Deleted:
+ description: "The image ID of an image that was deleted"
+ type: "string"
+
+ ServiceCreateResponse:
+ type: "object"
+ description: |
+ contains the information returned to a client on the
+ creation of a new service.
+ properties:
+ ID:
+ description: "The ID of the created service."
+ type: "string"
+ x-nullable: false
+ example: "ak7w3gjqoa3kuz8xcpnyy0pvl"
+ Warnings:
+ description: |
+ Optional warning message.
+
+ FIXME(thaJeztah): this should have "omitempty" in the generated type.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example:
+ - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+
+ ServiceUpdateResponse:
+ type: "object"
+ properties:
+ Warnings:
+ description: "Optional warning messages"
+ type: "array"
+ items:
+ type: "string"
+ example:
+ Warnings:
+ - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+
+ ContainerInspectResponse:
+ type: "object"
+ title: "ContainerInspectResponse"
+ x-go-name: "InspectResponse"
+ properties:
+ Id:
+ description: |-
+ The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes).
+ type: "string"
+ x-go-name: "ID"
+ minLength: 64
+ maxLength: 64
+ pattern: "^[0-9a-fA-F]{64}$"
+ example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf"
+ Created:
+ description: |-
+ Date and time at which the container was created, formatted in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ x-nullable: true
+ example: "2025-02-17T17:43:39.64001363Z"
+ Path:
+ description: |-
+ The path to the command being run
+ type: "string"
+ example: "/bin/sh"
+ Args:
+ description: "The arguments to the command being run"
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "-c"
+ - "exit 9"
+ State:
+ $ref: "#/definitions/ContainerState"
+ Image:
+ description: |-
+ The ID (digest) of the image that this container was created from.
+ type: "string"
+ example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782"
+ ResolvConfPath:
+ description: |-
+ Location of the `/etc/resolv.conf` generated for the container on the
+ host.
+
+ This file is managed through the docker daemon, and should not be
+ accessed or modified by other tools.
+ type: "string"
+ example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf"
+ HostnamePath:
+ description: |-
+ Location of the `/etc/hostname` generated for the container on the
+ host.
+
+ This file is managed through the docker daemon, and should not be
+ accessed or modified by other tools.
+ type: "string"
+ example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname"
+ HostsPath:
+ description: |-
+ Location of the `/etc/hosts` generated for the container on the
+ host.
+
+ This file is managed through the docker daemon, and should not be
+ accessed or modified by other tools.
+ type: "string"
+ example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts"
+ LogPath:
+ description: |-
+ Location of the file used to buffer the container's logs. Depending on
+ the logging-driver used for the container, this field may be omitted.
+
+ This file is managed through the docker daemon, and should not be
+ accessed or modified by other tools.
+ type: "string"
+ x-nullable: true
+ example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log"
+ Name:
+ description: |-
+ The name associated with this container.
+
+ For historic reasons, the name may be prefixed with a forward-slash (`/`).
+ type: "string"
+ example: "/funny_chatelet"
+ RestartCount:
+ description: |-
+ Number of times the container was restarted since it was created,
+ or since daemon was started.
+ type: "integer"
+ example: 0
+ Driver:
+ description: |-
+ The storage-driver used for the container's filesystem (graph-driver
+ or snapshotter).
+ type: "string"
+ example: "overlayfs"
+ Platform:
+ description: |-
+ The platform (operating system) for which the container was created.
+
+ This field was introduced for the experimental "LCOW" (Linux Containers
+ On Windows) features, which has been removed. In most cases, this field
+ is equal to the host's operating system (`linux` or `windows`).
+ type: "string"
+ example: "linux"
+ ImageManifestDescriptor:
+ $ref: "#/definitions/OCIDescriptor"
+ description: |-
+ OCI descriptor of the platform-specific manifest of the image
+ the container was created from.
+
+ Note: Only available if the daemon provides a multi-platform
+ image store.
+ MountLabel:
+ description: |-
+ SELinux mount label set for the container.
+ type: "string"
+ example: ""
+ ProcessLabel:
+ description: |-
+ SELinux process label set for the container.
+ type: "string"
+ example: ""
+ AppArmorProfile:
+ description: |-
+ The AppArmor profile set for the container.
+ type: "string"
+ example: ""
+ ExecIDs:
+ description: |-
+ IDs of exec instances that are running in the container.
+ type: "array"
+ items:
+ type: "string"
+ x-nullable: true
+ example:
+ - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca"
+ - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ GraphDriver:
+ $ref: "#/definitions/DriverData"
+ SizeRw:
+ description: |-
+ The size of files that have been created or changed by this container.
+
+ This field is omitted by default, and only set when size is requested
+ in the API request.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ example: "122880"
+ SizeRootFs:
+ description: |-
+ The total size of all files in the read-only layers from the image
+ that the container uses. These layers can be shared between containers.
+
+ This field is omitted by default, and only set when size is requested
+ in the API request.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ example: "1653948416"
+ Mounts:
+ description: |-
+ List of mounts used by the container.
+ type: "array"
+ items:
+ $ref: "#/definitions/MountPoint"
+ Config:
+ $ref: "#/definitions/ContainerConfig"
+ NetworkSettings:
+ $ref: "#/definitions/NetworkSettings"
+
+ ContainerSummary:
+ type: "object"
+ properties:
+ Id:
+ description: |-
+ The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes).
+ type: "string"
+ x-go-name: "ID"
+ minLength: 64
+ maxLength: 64
+ pattern: "^[0-9a-fA-F]{64}$"
+ example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf"
+ Names:
+ description: |-
+ The names associated with this container. Most containers have a single
+ name, but when using legacy "links", the container can have multiple
+ names.
+
+ For historic reasons, names are prefixed with a forward-slash (`/`).
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "/funny_chatelet"
+ Image:
+ description: |-
+ The name or ID of the image used to create the container.
+
+ This field shows the image reference as was specified when creating the container,
+ which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest`
+ or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`),
+ short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`).
+
+ The content of this field can be updated at runtime if the image used to
+ create the container is untagged, in which case the field is updated to
+ contain the the image ID (digest) it was resolved to in its canonical,
+ non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`).
+ type: "string"
+ example: "docker.io/library/ubuntu:latest"
+ ImageID:
+ description: |-
+ The ID (digest) of the image that this container was created from.
+ type: "string"
+ example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782"
+ ImageManifestDescriptor:
+ $ref: "#/definitions/OCIDescriptor"
+ x-nullable: true
+ description: |
+ OCI descriptor of the platform-specific manifest of the image
+ the container was created from.
+
+ Note: Only available if the daemon provides a multi-platform
+ image store.
+
+ This field is not populated in the `GET /system/df` endpoint.
+ Command:
+ description: "Command to run when starting the container"
+ type: "string"
+ example: "/bin/bash"
+ Created:
+ description: |-
+ Date and time at which the container was created as a Unix timestamp
+ (number of seconds since EPOCH).
+ type: "integer"
+ format: "int64"
+ example: "1739811096"
+ Ports:
+ description: |-
+ Port-mappings for the container.
+ type: "array"
+ items:
+ $ref: "#/definitions/Port"
+ SizeRw:
+ description: |-
+ The size of files that have been created or changed by this container.
+
+ This field is omitted by default, and only set when size is requested
+ in the API request.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ example: "122880"
+ SizeRootFs:
+ description: |-
+ The total size of all files in the read-only layers from the image
+ that the container uses. These layers can be shared between containers.
+
+ This field is omitted by default, and only set when size is requested
+ in the API request.
+ type: "integer"
+ format: "int64"
+ x-nullable: true
+ example: "1653948416"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ State:
+ description: |
+ The state of this container.
+ type: "string"
+ enum:
+ - "created"
+ - "running"
+ - "paused"
+ - "restarting"
+ - "exited"
+ - "removing"
+ - "dead"
+ example: "running"
+ Status:
+ description: |-
+ Additional human-readable status of this container (e.g. `Exit 0`)
+ type: "string"
+ example: "Up 4 days"
+ HostConfig:
+ type: "object"
+ description: |-
+ Summary of host-specific runtime information of the container. This
+ is a reduced set of information in the container's "HostConfig" as
+ available in the container "inspect" response.
+ properties:
+ NetworkMode:
+ description: |-
+ Networking mode (`host`, `none`, `container:<id>`) or name of the
+ primary network the container is using.
+
+ This field is primarily for backward compatibility. The container
+ can be connected to multiple networks for which information can be
+ found in the `NetworkSettings.Networks` field, which enumerates
+ settings per network.
+ type: "string"
+ example: "mynetwork"
+ Annotations:
+ description: |-
+ Arbitrary key-value metadata attached to the container.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ io.kubernetes.docker.type: "container"
+ io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3"
+ NetworkSettings:
+ description: |-
+ Summary of the container's network settings
+ type: "object"
+ properties:
+ Networks:
+ type: "object"
+ description: |-
+ Summary of network-settings for each network the container is
+ attached to.
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ Mounts:
+ type: "array"
+ description: |-
+ List of mounts used by the container.
+ items:
+ $ref: "#/definitions/MountPoint"
+
+ Driver:
+ description: "Driver represents a driver (network, logging, secrets)."
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ description: "Name of the driver."
+ type: "string"
+ x-nullable: false
+ example: "some-driver"
+ Options:
+ description: "Key/value map of driver-specific options."
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example:
+ OptionA: "value for driver-specific option A"
+ OptionB: "value for driver-specific option B"
+
+ SecretSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the secret."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Data:
+ description: |
+ Data is the data to store as a secret, formatted as a Base64-url-safe-encoded
+ ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string.
+ It must be empty if the Driver field is set, in which case the data is
+ loaded from an external secret store. The maximum allowed size is 500KB,
+ as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize).
+
+ This field is only used to _create_ a secret, and is not returned by
+ other endpoints.
+ type: "string"
+ example: ""
+ Driver:
+ description: |
+ Name of the secrets driver used to fetch the secret's value from an
+ external secret store.
+ $ref: "#/definitions/Driver"
+ Templating:
+ description: |
+ Templating driver, if applicable
+
+ Templating controls whether and how to evaluate the config payload as
+ a template. If no driver is set, no templating is used.
+ $ref: "#/definitions/Driver"
+
+ Secret:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ $ref: "#/definitions/SecretSpec"
+
+ ConfigSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the config."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Data:
+ description: |
+ Data is the data to store as a config, formatted as a Base64-url-safe-encoded
+ ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string.
+ The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize).
+ type: "string"
+ Templating:
+ description: |
+ Templating driver, if applicable
+
+ Templating controls whether and how to evaluate the config payload as
+ a template. If no driver is set, no templating is used.
+ $ref: "#/definitions/Driver"
+
+ Config:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ConfigSpec"
+
+ ContainerState:
+ description: |
+ ContainerState stores container's running state. It's part of ContainerJSONBase
+ and will be returned by the "inspect" command.
+ type: "object"
+ x-nullable: true
+ properties:
+ Status:
+ description: |
+ String representation of the container state. Can be one of "created",
+ "running", "paused", "restarting", "removing", "exited", or "dead".
+ type: "string"
+ enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
+ example: "running"
+ Running:
+ description: |
+ Whether this container is running.
+
+ Note that a running container can be _paused_. The `Running` and `Paused`
+ booleans are not mutually exclusive:
+
+ When pausing a container (on Linux), the freezer cgroup is used to suspend
+ all processes in the container. Freezing the process requires the process to
+ be running. As a result, paused containers are both `Running` _and_ `Paused`.
+
+ Use the `Status` field instead to determine if a container's state is "running".
+ type: "boolean"
+ example: true
+ Paused:
+ description: "Whether this container is paused."
+ type: "boolean"
+ example: false
+ Restarting:
+ description: "Whether this container is restarting."
+ type: "boolean"
+ example: false
+ OOMKilled:
+ description: |
+ Whether a process within this container has been killed because it ran
+ out of memory since the container was last started.
+ type: "boolean"
+ example: false
+ Dead:
+ type: "boolean"
+ example: false
+ Pid:
+ description: "The process ID of this container"
+ type: "integer"
+ example: 1234
+ ExitCode:
+ description: "The last exit code of this container"
+ type: "integer"
+ example: 0
+ Error:
+ type: "string"
+ StartedAt:
+ description: "The time when this container was last started."
+ type: "string"
+ example: "2020-01-06T09:06:59.461876391Z"
+ FinishedAt:
+ description: "The time when this container last exited."
+ type: "string"
+ example: "2020-01-06T09:07:59.461876391Z"
+ Health:
+ $ref: "#/definitions/Health"
+
+ ContainerCreateResponse:
+ description: "OK response to ContainerCreate operation"
+ type: "object"
+ title: "ContainerCreateResponse"
+ x-go-name: "CreateResponse"
+ required: [Id, Warnings]
+ properties:
+ Id:
+ description: "The ID of the created container"
+ type: "string"
+ x-nullable: false
+ example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ Warnings:
+ description: "Warnings encountered when creating the container"
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ example: []
+
+ ContainerUpdateResponse:
+ type: "object"
+ title: "ContainerUpdateResponse"
+ x-go-name: "UpdateResponse"
+ description: |-
+ Response for a successful container-update.
+ properties:
+ Warnings:
+ type: "array"
+ description: |-
+ Warnings encountered when updating the container.
+ items:
+ type: "string"
+ example: ["Published ports are discarded when using host network mode"]
+
+ ContainerStatsResponse:
+ description: |
+ Statistics sample for a container.
+ type: "object"
+ x-go-name: "StatsResponse"
+ title: "ContainerStatsResponse"
+ properties:
+ name:
+ description: "Name of the container"
+ type: "string"
+ x-nullable: true
+ example: "boring_wozniak"
+ id:
+ description: "ID of the container"
+ type: "string"
+ x-nullable: true
+ example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ read:
+ description: |
+ Date and time at which this sample was collected.
+ The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
+ with nano-seconds.
+ type: "string"
+ format: "date-time"
+ example: "2025-01-16T13:55:22.165243637Z"
+ preread:
+ description: |
+ Date and time at which this first sample was collected. This field
+ is not propagated if the "one-shot" option is set. If the "one-shot"
+ option is set, this field may be omitted, empty, or set to a default
+ date (`0001-01-01T00:00:00Z`).
+
+ The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
+ with nano-seconds.
+ type: "string"
+ format: "date-time"
+ example: "2025-01-16T13:55:21.160452595Z"
+ pids_stats:
+ $ref: "#/definitions/ContainerPidsStats"
+ blkio_stats:
+ $ref: "#/definitions/ContainerBlkioStats"
+ num_procs:
+ description: |
+ The number of processors on the system.
+
+ This field is Windows-specific and always zero for Linux containers.
+ type: "integer"
+ format: "uint32"
+ example: 16
+ storage_stats:
+ $ref: "#/definitions/ContainerStorageStats"
+ cpu_stats:
+ $ref: "#/definitions/ContainerCPUStats"
+ precpu_stats:
+ $ref: "#/definitions/ContainerCPUStats"
+ memory_stats:
+ $ref: "#/definitions/ContainerMemoryStats"
+ networks:
+ description: |
+ Network statistics for the container per interface.
+
+ This field is omitted if the container has no networking enabled.
+ x-nullable: true
+ additionalProperties:
+ $ref: "#/definitions/ContainerNetworkStats"
+ example:
+ eth0:
+ rx_bytes: 5338
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 36
+ tx_bytes: 648
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 8
+ eth5:
+ rx_bytes: 4641
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 26
+ tx_bytes: 690
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 9
+
+ ContainerBlkioStats:
+ description: |
+ BlkioStats stores all IO service stats for data read and write.
+
+ This type is Linux-specific and holds many fields that are specific to cgroups v1.
+ On a cgroup v2 host, all fields other than `io_service_bytes_recursive`
+ are omitted or `null`.
+
+ This type is only populated on Linux and omitted for Windows containers.
+ type: "object"
+ x-go-name: "BlkioStats"
+ x-nullable: true
+ properties:
+ io_service_bytes_recursive:
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_serviced_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_queue_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_service_time_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_wait_time_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_merged_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ io_time_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ sectors_recursive:
+ description: |
+ This field is only available when using Linux containers with
+ cgroups v1. It is omitted or `null` when using cgroups v2.
+ x-nullable: true
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerBlkioStatEntry"
+ example:
+ io_service_bytes_recursive: [
+ {"major": 254, "minor": 0, "op": "read", "value": 7593984},
+ {"major": 254, "minor": 0, "op": "write", "value": 100}
+ ]
+ io_serviced_recursive: null
+ io_queue_recursive: null
+ io_service_time_recursive: null
+ io_wait_time_recursive: null
+ io_merged_recursive: null
+ io_time_recursive: null
+ sectors_recursive: null
+
+ ContainerBlkioStatEntry:
+ description: |
+ Blkio stats entry.
+
+ This type is Linux-specific and omitted for Windows containers.
+ type: "object"
+ x-go-name: "BlkioStatEntry"
+ x-nullable: true
+ properties:
+ major:
+ type: "integer"
+ format: "uint64"
+ example: 254
+ minor:
+ type: "integer"
+ format: "uint64"
+ example: 0
+ op:
+ type: "string"
+ example: "read"
+ value:
+ type: "integer"
+ format: "uint64"
+ example: 7593984
+
+ ContainerCPUStats:
+ description: |
+ CPU related info of the container
+ type: "object"
+ x-go-name: "CPUStats"
+ x-nullable: true
+ properties:
+ cpu_usage:
+ $ref: "#/definitions/ContainerCPUUsage"
+ system_cpu_usage:
+ description: |
+ System Usage.
+
+ This field is Linux-specific and omitted for Windows containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 5
+ online_cpus:
+ description: |
+ Number of online CPUs.
+
+ This field is Linux-specific and omitted for Windows containers.
+ type: "integer"
+ format: "uint32"
+ x-nullable: true
+ example: 5
+ throttling_data:
+ $ref: "#/definitions/ContainerThrottlingData"
+
+ ContainerCPUUsage:
+ description: |
+ All CPU stats aggregated since container inception.
+ type: "object"
+ x-go-name: "CPUUsage"
+ x-nullable: true
+ properties:
+ total_usage:
+ description: |
+ Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows).
+ type: "integer"
+ format: "uint64"
+ example: 29912000
+ percpu_usage:
+ description: |
+ Total CPU time (in nanoseconds) consumed per core (Linux).
+
+ This field is Linux-specific when using cgroups v1. It is omitted
+ when using cgroups v2 and Windows containers.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "integer"
+ format: "uint64"
+ example: 29912000
+
+ usage_in_kernelmode:
+ description: |
+ Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux),
+ or time spent (in 100's of nanoseconds) by all container processes in
+ kernel mode (Windows).
+
+ Not populated for Windows containers using Hyper-V isolation.
+ type: "integer"
+ format: "uint64"
+ example: 21994000
+ usage_in_usermode:
+ description: |
+ Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux),
+ or time spent (in 100's of nanoseconds) by all container processes in
+ kernel mode (Windows).
+
+ Not populated for Windows containers using Hyper-V isolation.
+ type: "integer"
+ format: "uint64"
+ example: 7918000
+
+ ContainerPidsStats:
+ description: |
+ PidsStats contains Linux-specific stats of a container's process-IDs (PIDs).
+
+ This type is Linux-specific and omitted for Windows containers.
+ type: "object"
+ x-go-name: "PidsStats"
+ x-nullable: true
+ properties:
+ current:
+ description: |
+ Current is the number of PIDs in the cgroup.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 5
+ limit:
+ description: |
+ Limit is the hard limit on the number of pids in the cgroup.
+ A "Limit" of 0 means that there is no limit.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 18446744073709551615
+
+ ContainerThrottlingData:
+ description: |
+ CPU throttling stats of the container.
+
+ This type is Linux-specific and omitted for Windows containers.
+ type: "object"
+ x-go-name: "ThrottlingData"
+ x-nullable: true
+ properties:
+ periods:
+ description: |
+ Number of periods with throttling active.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ throttled_periods:
+ description: |
+ Number of periods when the container hit its throttling limit.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ throttled_time:
+ description: |
+ Aggregated time (in nanoseconds) the container was throttled for.
+ type: "integer"
+ format: "uint64"
+ example: 0
+
+ ContainerMemoryStats:
+ description: |
+ Aggregates all memory stats since container inception on Linux.
+ Windows returns stats for commit and private working set only.
+ type: "object"
+ x-go-name: "MemoryStats"
+ properties:
+ usage:
+ description: |
+ Current `res_counter` usage for memory.
+
+ This field is Linux-specific and omitted for Windows containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+ max_usage:
+ description: |
+ Maximum usage ever recorded.
+
+ This field is Linux-specific and only supported on cgroups v1.
+ It is omitted when using cgroups v2 and for Windows containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+ stats:
+ description: |
+ All the stats exported via memory.stat. when using cgroups v2.
+
+ This field is Linux-specific and omitted for Windows containers.
+ type: "object"
+ additionalProperties:
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example:
+ {
+ "active_anon": 1572864,
+ "active_file": 5115904,
+ "anon": 1572864,
+ "anon_thp": 0,
+ "file": 7626752,
+ "file_dirty": 0,
+ "file_mapped": 2723840,
+ "file_writeback": 0,
+ "inactive_anon": 0,
+ "inactive_file": 2510848,
+ "kernel_stack": 16384,
+ "pgactivate": 0,
+ "pgdeactivate": 0,
+ "pgfault": 2042,
+ "pglazyfree": 0,
+ "pglazyfreed": 0,
+ "pgmajfault": 45,
+ "pgrefill": 0,
+ "pgscan": 0,
+ "pgsteal": 0,
+ "shmem": 0,
+ "slab": 1180928,
+ "slab_reclaimable": 725576,
+ "slab_unreclaimable": 455352,
+ "sock": 0,
+ "thp_collapse_alloc": 0,
+ "thp_fault_alloc": 1,
+ "unevictable": 0,
+ "workingset_activate": 0,
+ "workingset_nodereclaim": 0,
+ "workingset_refault": 0
+ }
+ failcnt:
+ description: |
+ Number of times memory usage hits limits.
+
+ This field is Linux-specific and only supported on cgroups v1.
+ It is omitted when using cgroups v2 and for Windows containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+ limit:
+ description: |
+ This field is Linux-specific and omitted for Windows containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 8217579520
+ commitbytes:
+ description: |
+ Committed bytes.
+
+ This field is Windows-specific and omitted for Linux containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+ commitpeakbytes:
+ description: |
+ Peak committed bytes.
+
+ This field is Windows-specific and omitted for Linux containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+ privateworkingset:
+ description: |
+ Private working set.
+
+ This field is Windows-specific and omitted for Linux containers.
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 0
+
+ ContainerNetworkStats:
+ description: |
+ Aggregates the network stats of one container
+ type: "object"
+ x-go-name: "NetworkStats"
+ x-nullable: true
+ properties:
+ rx_bytes:
+ description: |
+ Bytes received. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 5338
+ rx_packets:
+ description: |
+ Packets received. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 36
+ rx_errors:
+ description: |
+ Received errors. Not used on Windows.
+
+ This field is Linux-specific and always zero for Windows containers.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ rx_dropped:
+ description: |
+ Incoming packets dropped. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ tx_bytes:
+ description: |
+ Bytes sent. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 1200
+ tx_packets:
+ description: |
+ Packets sent. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 12
+ tx_errors:
+ description: |
+ Sent errors. Not used on Windows.
+
+ This field is Linux-specific and always zero for Windows containers.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ tx_dropped:
+ description: |
+ Outgoing packets dropped. Windows and Linux.
+ type: "integer"
+ format: "uint64"
+ example: 0
+ endpoint_id:
+ description: |
+ Endpoint ID. Not used on Linux.
+
+ This field is Windows-specific and omitted for Linux containers.
+ type: "string"
+ x-nullable: true
+ instance_id:
+ description: |
+ Instance ID. Not used on Linux.
+
+ This field is Windows-specific and omitted for Linux containers.
+ type: "string"
+ x-nullable: true
+
+ ContainerStorageStats:
+ description: |
+ StorageStats is the disk I/O stats for read/write on Windows.
+
+ This type is Windows-specific and omitted for Linux containers.
+ type: "object"
+ x-go-name: "StorageStats"
+ x-nullable: true
+ properties:
+ read_count_normalized:
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 7593984
+ read_size_bytes:
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 7593984
+ write_count_normalized:
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 7593984
+ write_size_bytes:
+ type: "integer"
+ format: "uint64"
+ x-nullable: true
+ example: 7593984
+
+ ContainerTopResponse:
+ type: "object"
+ x-go-name: "TopResponse"
+ title: "ContainerTopResponse"
+ description: |-
+ Container "top" response.
+ properties:
+ Titles:
+ description: "The ps column titles"
+ type: "array"
+ items:
+ type: "string"
+ example:
+ Titles:
+ - "UID"
+ - "PID"
+ - "PPID"
+ - "C"
+ - "STIME"
+ - "TTY"
+ - "TIME"
+ - "CMD"
+ Processes:
+ description: |-
+ Each process running in the container, where each process
+ is an array of values corresponding to the titles.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ Processes:
+ -
+ - "root"
+ - "13642"
+ - "882"
+ - "0"
+ - "17:03"
+ - "pts/0"
+ - "00:00:00"
+ - "/bin/bash"
+ -
+ - "root"
+ - "13735"
+ - "13642"
+ - "0"
+ - "17:06"
+ - "pts/0"
+ - "00:00:00"
+ - "sleep 10"
+
+ ContainerWaitResponse:
+ description: "OK response to ContainerWait operation"
+ type: "object"
+ x-go-name: "WaitResponse"
+ title: "ContainerWaitResponse"
+ required: [StatusCode]
+ properties:
+ StatusCode:
+ description: "Exit code of the container"
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ Error:
+ $ref: "#/definitions/ContainerWaitExitError"
+
+ ContainerWaitExitError:
+ description: "container waiting error, if any"
+ type: "object"
+ x-go-name: "WaitExitError"
+ properties:
+ Message:
+ description: "Details of an error"
+ type: "string"
+
+ SystemVersion:
+ type: "object"
+ description: |
+ Response of Engine API: GET "/version"
+ properties:
+ Platform:
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ type: "string"
+ Components:
+ type: "array"
+ description: |
+ Information about system components
+ items:
+ type: "object"
+ x-go-name: ComponentVersion
+ required: [Name, Version]
+ properties:
+ Name:
+ description: |
+ Name of the component
+ type: "string"
+ example: "Engine"
+ Version:
+ description: |
+ Version of the component
+ type: "string"
+ x-nullable: false
+ example: "27.0.1"
+ Details:
+ description: |
+ Key/value pairs of strings with additional information about the
+ component. These values are intended for informational purposes
+ only, and their content is not defined, and not part of the API
+ specification.
+
+ These messages can be printed by the client as information to the user.
+ type: "object"
+ x-nullable: true
+ Version:
+ description: "The version of the daemon"
+ type: "string"
+ example: "27.0.1"
+ ApiVersion:
+ description: |
+ The default (and highest) API version that is supported by the daemon
+ type: "string"
+ example: "1.47"
+ MinAPIVersion:
+ description: |
+ The minimum API version that is supported by the daemon
+ type: "string"
+ example: "1.24"
+ GitCommit:
+ description: |
+ The Git commit of the source code that was used to build the daemon
+ type: "string"
+ example: "48a66213fe"
+ GoVersion:
+ description: |
+ The version Go used to compile the daemon, and the version of the Go
+ runtime in use.
+ type: "string"
+ example: "go1.22.7"
+ Os:
+ description: |
+ The operating system that the daemon is running on ("linux" or "windows")
+ type: "string"
+ example: "linux"
+ Arch:
+ description: |
+ The architecture that the daemon is running on
+ type: "string"
+ example: "amd64"
+ KernelVersion:
+ description: |
+ The kernel version (`uname -r`) that the daemon is running on.
+
+ This field is omitted when empty.
+ type: "string"
+ example: "6.8.0-31-generic"
+ Experimental:
+ description: |
+ Indicates if the daemon is started with experimental features enabled.
+
+ This field is omitted when empty / false.
+ type: "boolean"
+ example: true
+ BuildTime:
+ description: |
+ The date and time that the daemon was compiled.
+ type: "string"
+ example: "2020-06-22T15:49:27.000000000+00:00"
+
+ SystemInfo:
+ type: "object"
+ properties:
+ ID:
+ description: |
+ Unique identifier of the daemon.
+
+ <p><br /></p>
+
+ > **Note**: The format of the ID itself is not part of the API, and
+ > should not be considered stable.
+ type: "string"
+ example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
+ Containers:
+ description: "Total number of containers on the host."
+ type: "integer"
+ example: 14
+ ContainersRunning:
+ description: |
+ Number of containers with status `"running"`.
+ type: "integer"
+ example: 3
+ ContainersPaused:
+ description: |
+ Number of containers with status `"paused"`.
+ type: "integer"
+ example: 1
+ ContainersStopped:
+ description: |
+ Number of containers with status `"stopped"`.
+ type: "integer"
+ example: 10
+ Images:
+ description: |
+ Total number of images on the host.
+
+ Both _tagged_ and _untagged_ (dangling) images are counted.
+ type: "integer"
+ example: 508
+ Driver:
+ description: "Name of the storage driver in use."
+ type: "string"
+ example: "overlay2"
+ DriverStatus:
+ description: |
+ Information specific to the storage driver, provided as
+ "label" / "value" pairs.
+
+ This information is provided by the storage driver, and formatted
+ in a way consistent with the output of `docker info` on the command
+ line.
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including the
+ > formatting of values and labels, should not be considered stable,
+ > and may change without notice.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["Backing Filesystem", "extfs"]
+ - ["Supports d_type", "true"]
+ - ["Native Overlay Diff", "true"]
+ DockerRootDir:
+ description: |
+ Root directory of persistent Docker state.
+
+ Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
+ on Windows.
+ type: "string"
+ example: "/var/lib/docker"
+ Plugins:
+ $ref: "#/definitions/PluginsInfo"
+ MemoryLimit:
+ description: "Indicates if the host has memory limit support enabled."
+ type: "boolean"
+ example: true
+ SwapLimit:
+ description: "Indicates if the host has memory swap limit support enabled."
+ type: "boolean"
+ example: true
+ KernelMemoryTCP:
+ description: |
+ Indicates if the host has kernel memory TCP limit support enabled. This
+ field is omitted if not supported.
+
+ Kernel memory TCP limits are not supported when using cgroups v2, which
+ does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.
+ type: "boolean"
+ example: true
+ CpuCfsPeriod:
+ description: |
+ Indicates if CPU CFS(Completely Fair Scheduler) period is supported by
+ the host.
+ type: "boolean"
+ example: true
+ CpuCfsQuota:
+ description: |
+ Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by
+ the host.
+ type: "boolean"
+ example: true
+ CPUShares:
+ description: |
+ Indicates if CPU Shares limiting is supported by the host.
+ type: "boolean"
+ example: true
+ CPUSet:
+ description: |
+ Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
+
+ See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
+ type: "boolean"
+ example: true
+ PidsLimit:
+ description: "Indicates if the host kernel has PID limit support enabled."
+ type: "boolean"
+ example: true
+ OomKillDisable:
+ description: "Indicates if OOM killer disable is supported on the host."
+ type: "boolean"
+ IPv4Forwarding:
+ description: "Indicates IPv4 forwarding is enabled."
+ type: "boolean"
+ example: true
+ BridgeNfIptables:
+ description: |
+ Indicates if `bridge-nf-call-iptables` is available on the host when
+ the daemon was started.
+
+ <p><br /></p>
+
+ > **Deprecated**: netfilter module is now loaded on-demand and no longer
+ > during daemon startup, making this field obsolete. This field is always
+ > `false` and will be removed in a API v1.49.
+ type: "boolean"
+ example: false
+ BridgeNfIp6tables:
+ description: |
+ Indicates if `bridge-nf-call-ip6tables` is available on the host.
+
+ <p><br /></p>
+
+ > **Deprecated**: netfilter module is now loaded on-demand, and no longer
+ > during daemon startup, making this field obsolete. This field is always
+ > `false` and will be removed in a API v1.49.
+ type: "boolean"
+ example: false
+ Debug:
+ description: |
+ Indicates if the daemon is running in debug-mode / with debug-level
+ logging enabled.
+ type: "boolean"
+ example: true
+ NFd:
+ description: |
+ The total number of file Descriptors in use by the daemon process.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 64
+ NGoroutines:
+ description: |
+ The number of goroutines that currently exist.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 174
+ SystemTime:
+ description: |
+ Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
+ format with nano-seconds.
+ type: "string"
+ example: "2017-08-08T20:28:29.06202363Z"
+ LoggingDriver:
+ description: |
+ The logging driver to use as a default for new containers.
+ type: "string"
+ CgroupDriver:
+ description: |
+ The driver to use for managing cgroups.
+ type: "string"
+ enum: ["cgroupfs", "systemd", "none"]
+ default: "cgroupfs"
+ example: "cgroupfs"
+ CgroupVersion:
+ description: |
+ The version of the cgroup.
+ type: "string"
+ enum: ["1", "2"]
+ default: "1"
+ example: "1"
+ NEventsListener:
+ description: "Number of event listeners subscribed."
+ type: "integer"
+ example: 30
+ KernelVersion:
+ description: |
+ Kernel version of the host.
+
+ On Linux, this information obtained from `uname`. On Windows this
+ information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
+ registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
+ type: "string"
+ example: "6.8.0-31-generic"
+ OperatingSystem:
+ description: |
+ Name of the host's operating system, for example: "Ubuntu 24.04 LTS"
+ or "Windows Server 2016 Datacenter"
+ type: "string"
+ example: "Ubuntu 24.04 LTS"
+ OSVersion:
+ description: |
+ Version of the host's operating system
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including its
+ > very existence, and the formatting of values, should not be considered
+ > stable, and may change without notice.
+ type: "string"
+ example: "24.04"
+ OSType:
+ description: |
+ Generic type of the operating system of the host, as returned by the
+ Go runtime (`GOOS`).
+
+ Currently returned values are "linux" and "windows". A full list of
+ possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
+ type: "string"
+ example: "linux"
+ Architecture:
+ description: |
+ Hardware architecture of the host, as returned by the Go runtime
+ (`GOARCH`).
+
+ A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
+ type: "string"
+ example: "x86_64"
+ NCPU:
+ description: |
+ The number of logical CPUs usable by the daemon.
+
+ The number of available CPUs is checked by querying the operating
+ system when the daemon starts. Changes to operating system CPU
+ allocation after the daemon is started are not reflected.
+ type: "integer"
+ example: 4
+ MemTotal:
+ description: |
+ Total amount of physical memory available on the host, in bytes.
+ type: "integer"
+ format: "int64"
+ example: 2095882240
+
+ IndexServerAddress:
+ description: |
+ Address / URL of the index server that is used for image search,
+ and as a default for user authentication for Docker Hub and Docker Cloud.
+ default: "https://index.docker.io/v1/"
+ type: "string"
+ example: "https://index.docker.io/v1/"
+ RegistryConfig:
+ $ref: "#/definitions/RegistryServiceConfig"
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+ HttpProxy:
+ description: |
+ HTTP-proxy configured for the daemon. This value is obtained from the
+ [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+ Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
+ are masked in the API response.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
+ HttpsProxy:
+ description: |
+ HTTPS-proxy configured for the daemon. This value is obtained from the
+ [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+ Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
+ are masked in the API response.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
+ NoProxy:
+ description: |
+ Comma-separated list of domain extensions for which no proxy should be
+ used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
+ environment variable.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "*.local, 169.254/16"
+ Name:
+ description: "Hostname of the host."
+ type: "string"
+ example: "node5.corp.example.com"
+ Labels:
+ description: |
+ User-defined labels (key/value metadata) as set on the daemon.
+
+ <p><br /></p>
+
+ > **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
+ > set through the daemon configuration, and _node_ labels, set from a
+ > manager node in the Swarm. Node labels are not included in this
+ > field. Node labels can be retrieved using the `/nodes/(id)` endpoint
+ > on a manager node in the Swarm.
+ type: "array"
+ items:
+ type: "string"
+ example: ["storage=ssd", "production"]
+ ExperimentalBuild:
+ description: |
+ Indicates if experimental features are enabled on the daemon.
+ type: "boolean"
+ example: true
+ ServerVersion:
+ description: |
+ Version string of the daemon.
+ type: "string"
+ example: "27.0.1"
+ Runtimes:
+ description: |
+ List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtimes configured on the daemon. Keys hold the "name" used to
+ reference the runtime.
+
+ The Docker daemon relies on an OCI compliant runtime (invoked via the
+ `containerd` daemon) as its interface to the Linux kernel namespaces,
+ cgroups, and SELinux.
+
+ The default runtime is `runc`, and automatically configured. Additional
+ runtimes can be configured by the user and will be listed here.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/Runtime"
+ default:
+ runc:
+ path: "runc"
+ example:
+ runc:
+ path: "runc"
+ runc-master:
+ path: "/go/bin/runc"
+ custom:
+ path: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs: ["--debug", "--systemd-cgroup=false"]
+ DefaultRuntime:
+ description: |
+ Name of the default OCI runtime that is used when starting containers.
+
+ The default can be overridden per-container at create time.
+ type: "string"
+ default: "runc"
+ example: "runc"
+ Swarm:
+ $ref: "#/definitions/SwarmInfo"
+ LiveRestoreEnabled:
+ description: |
+ Indicates if live restore is enabled.
+
+ If enabled, containers are kept running when the daemon is shutdown
+ or upon daemon start if running containers are detected.
+ type: "boolean"
+ default: false
+ example: false
+ Isolation:
+ description: |
+ Represents the isolation technology to use as a default for containers.
+ The supported values are platform-specific.
+
+ If no isolation value is specified on daemon start, on Windows client,
+ the default is `hyperv`, and on Windows server, the default is `process`.
+
+ This option is currently not used on other platforms.
+ default: "default"
+ type: "string"
+ enum:
+ - "default"
+ - "hyperv"
+ - "process"
+ - ""
+ InitBinary:
+ description: |
+ Name and, optional, path of the `docker-init` binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "docker-init"
+ ContainerdCommit:
+ $ref: "#/definitions/Commit"
+ RuncCommit:
+ $ref: "#/definitions/Commit"
+ InitCommit:
+ $ref: "#/definitions/Commit"
+ SecurityOptions:
+ description: |
+ List of security features that are enabled on the daemon, such as
+ apparmor, seccomp, SELinux, user-namespaces (userns), rootless and
+ no-new-privileges.
+
+ Additional configuration options for each security feature may
+ be present, and are included as a comma-separated list of key/value
+ pairs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "name=apparmor"
+ - "name=seccomp,profile=default"
+ - "name=selinux"
+ - "name=userns"
+ - "name=rootless"
+ ProductLicense:
+ description: |
+ Reports a summary of the product license on the daemon.
+
+ If a commercial license has been applied to the daemon, information
+ such as number of nodes, and expiration are included.
+ type: "string"
+ example: "Community Engine"
+ DefaultAddressPools:
+ description: |
+ List of custom default address pools for local networks, which can be
+ specified in the daemon.json file or dockerd option.
+
+ Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256
+ 10.10.[0-255].0/24 address pools.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Base:
+ description: "The network address in CIDR format"
+ type: "string"
+ example: "10.10.0.0/16"
+ Size:
+ description: "The network pool size"
+ type: "integer"
+ example: "24"
+ Warnings:
+ description: |
+ List of warnings / informational messages about missing features, or
+ issues related to the daemon configuration.
+
+ These messages can be printed by the client as information to the user.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "WARNING: No memory limit support"
+ CDISpecDirs:
+ description: |
+ List of directories where (Container Device Interface) CDI
+ specifications are located.
+
+ These specifications define vendor-specific modifications to an OCI
+ runtime specification for a container being created.
+
+ An empty list indicates that CDI device injection is disabled.
+
+ Note that since using CDI device injection requires the daemon to have
+ experimental enabled. For non-experimental daemons an empty list will
+ always be returned.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "/etc/cdi"
+ - "/var/run/cdi"
+ Containerd:
+ $ref: "#/definitions/ContainerdInfo"
+
+ ContainerdInfo:
+ description: |
+ Information for connecting to the containerd instance that is used by the daemon.
+ This is included for debugging purposes only.
+ type: "object"
+ x-nullable: true
+ properties:
+ Address:
+ description: "The address of the containerd socket."
+ type: "string"
+ example: "/run/containerd/containerd.sock"
+ Namespaces:
+ description: |
+ The namespaces that the daemon uses for running containers and
+ plugins in containerd. These namespaces can be configured in the
+ daemon configuration, and are considered to be used exclusively
+ by the daemon, Tampering with the containerd instance may cause
+ unexpected behavior.
+
+ As these namespaces are considered to be exclusively accessed
+ by the daemon, it is not recommended to change these values,
+ or to change them to a value that is used by other systems,
+ such as cri-containerd.
+ type: "object"
+ properties:
+ Containers:
+ description: |
+ The default containerd namespace used for containers managed
+ by the daemon.
+
+ The default namespace for containers is "moby", but will be
+ suffixed with the `<uid>.<gid>` of the remapped `root` if
+ user-namespaces are enabled and the containerd image-store
+ is used.
+ type: "string"
+ default: "moby"
+ example: "moby"
+ Plugins:
+ description: |
+ The default containerd namespace used for plugins managed by
+ the daemon.
+
+ The default namespace for plugins is "plugins.moby", but will be
+ suffixed with the `<uid>.<gid>` of the remapped `root` if
+ user-namespaces are enabled and the containerd image-store
+ is used.
+ type: "string"
+ default: "plugins.moby"
+ example: "plugins.moby"
+
+ # PluginsInfo is a temp struct holding Plugins name
+ # registered with docker daemon. It is used by Info struct
+ PluginsInfo:
+ description: |
+ Available plugins per type.
+
+ <p><br /></p>
+
+ > **Note**: Only unmanaged (V1) plugins are included in this list.
+ > V1 plugins are "lazily" loaded, and are not returned in this list
+ > if there is no resource using the plugin.
+ type: "object"
+ properties:
+ Volume:
+ description: "Names of available volume-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["local"]
+ Network:
+ description: "Names of available network-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
+ Authorization:
+ description: "Names of available authorization plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["img-authz-plugin", "hbm"]
+ Log:
+ description: "Names of available logging-drivers, and logging-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"]
+
+
+ RegistryServiceConfig:
+ description: |
+ RegistryServiceConfig stores daemon registry services configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ AllowNondistributableArtifactsCIDRs:
+ description: |
+ List of IP ranges to which nondistributable artifacts can be pushed,
+ using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
+
+ <p><br /></p>
+
+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled
+ > and this field is always `null`. This field will be removed in a API v1.49.
+ type: "array"
+ items:
+ type: "string"
+ example: []
+ AllowNondistributableArtifactsHostnames:
+ description: |
+ List of registry hostnames to which nondistributable artifacts can be
+ pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
+
+ <p><br /></p>
+
+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled
+ > and this field is always `null`. This field will be removed in a API v1.49.
+ type: "array"
+ items:
+ type: "string"
+ example: []
+ InsecureRegistryCIDRs:
+ description: |
+ List of IP ranges of insecure registries, using the CIDR syntax
+ ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
+ accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
+ from unknown CAs) communication.
+
+ By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as
+ insecure. All other registries are secure. Communicating with an
+ insecure registry is not possible if the daemon assumes that registry
+ is secure.
+
+ This configuration override this behavior, insecure communication with
+ registries whose resolved IP address is within the subnet described by
+ the CIDR syntax.
+
+ Registries can also be marked insecure by hostname. Those registries
+ are listed under `IndexConfigs` and have their `Secure` field set to
+ `false`.
+
+ > **Warning**: Using this option can be useful when running a local
+ > registry, but introduces security vulnerabilities. This option
+ > should therefore ONLY be used for testing purposes. For increased
+ > security, users should add their CA to their system's list of trusted
+ > CAs instead of enabling this option.
+ type: "array"
+ items:
+ type: "string"
+ example: ["::1/128", "127.0.0.0/8"]
+ IndexConfigs:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/IndexInfo"
+ example:
+ "127.0.0.1:5000":
+ "Name": "127.0.0.1:5000"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "[2001:db8:a0b:12f0::1]:80":
+ "Name": "[2001:db8:a0b:12f0::1]:80"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "docker.io":
+ Name: "docker.io"
+ Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
+ Secure: true
+ Official: true
+ "registry.internal.corp.example.com:3000":
+ Name: "registry.internal.corp.example.com:3000"
+ Mirrors: []
+ Secure: false
+ Official: false
+ Mirrors:
+ description: |
+ List of registry URLs that act as a mirror for the official
+ (`docker.io`) registry.
+
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://[2001:db8:a0b:12f0::1]/"
+
+ IndexInfo:
+ description:
+ IndexInfo contains information about a registry.
+ type: "object"
+ x-nullable: true
+ properties:
+ Name:
+ description: |
+ Name of the registry, such as "docker.io".
+ type: "string"
+ example: "docker.io"
+ Mirrors:
+ description: |
+ List of mirrors, expressed as URIs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://registry-2.docker.io/"
+ - "https://registry-3.docker.io/"
+ Secure:
+ description: |
+ Indicates if the registry is part of the list of insecure
+ registries.
+
+ If `false`, the registry is insecure. Insecure registries accept
+ un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
+ unknown CAs) communication.
+
+ > **Warning**: Insecure registries can be useful when running a local
+ > registry. However, because its use creates security vulnerabilities
+ > it should ONLY be enabled for testing purposes. For increased
+ > security, users should add their CA to their system's list of
+ > trusted CAs instead of enabling this option.
+ type: "boolean"
+ example: true
+ Official:
+ description: |
+ Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
+ type: "boolean"
+ example: true
+
+ Runtime:
+ description: |
+ Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtime.
+
+ The runtime is invoked by the daemon via the `containerd` daemon. OCI
+ runtimes act as an interface to the Linux kernel namespaces, cgroups,
+ and SELinux.
+ type: "object"
+ properties:
+ path:
+ description: |
+ Name and, optional, path, of the OCI executable binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs:
+ description: |
+ List of command-line arguments to pass to the runtime when invoked.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: ["--debug", "--systemd-cgroup=false"]
+ status:
+ description: |
+ Information specific to the runtime.
+
+ While this API specification does not define data provided by runtimes,
+ the following well-known properties may be provided by runtimes:
+
+ `org.opencontainers.runtime-spec.features`: features structure as defined
+ in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md),
+ in a JSON string representation.
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including the
+ > formatting of values and labels, should not be considered stable,
+ > and may change without notice.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}"
+
+ Commit:
+ description: |
+ Commit holds the Git-commit (SHA1) that a binary was built from, as
+ reported in the version-string of external tools, such as `containerd`,
+ or `runC`.
+ type: "object"
+ properties:
+ ID:
+ description: "Actual commit ID of external tool."
+ type: "string"
+ example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
+ Expected:
+ description: |
+ Commit ID of external tool expected by dockerd as set at build time.
+
+ **Deprecated**: This field is deprecated and will be omitted in a API v1.49.
+ type: "string"
+ example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
+
+ SwarmInfo:
+ description: |
+ Represents generic information about swarm.
+ type: "object"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ default: ""
+ example: "k67qz4598weg5unwwffg6z1m1"
+ NodeAddr:
+ description: |
+ IP address at which this node can be reached by other nodes in the
+ swarm.
+ type: "string"
+ default: ""
+ example: "10.0.0.46"
+ LocalNodeState:
+ $ref: "#/definitions/LocalNodeState"
+ ControlAvailable:
+ type: "boolean"
+ default: false
+ example: true
+ Error:
+ type: "string"
+ default: ""
+ RemoteManagers:
+ description: |
+ List of ID's and addresses of other managers in the swarm.
+ type: "array"
+ default: null
+ x-nullable: true
+ items:
+ $ref: "#/definitions/PeerNode"
+ example:
+ - NodeID: "71izy0goik036k48jg985xnds"
+ Addr: "10.0.0.158:2377"
+ - NodeID: "79y6h1o4gv8n120drcprv5nmc"
+ Addr: "10.0.0.159:2377"
+ - NodeID: "k67qz4598weg5unwwffg6z1m1"
+ Addr: "10.0.0.46:2377"
+ Nodes:
+ description: "Total number of nodes in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 4
+ Managers:
+ description: "Total number of managers in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 3
+ Cluster:
+ $ref: "#/definitions/ClusterInfo"
+
+ LocalNodeState:
+ description: "Current local status of this node."
+ type: "string"
+ default: ""
+ enum:
+ - ""
+ - "inactive"
+ - "pending"
+ - "active"
+ - "error"
+ - "locked"
+ example: "active"
+
+ PeerNode:
+ description: "Represents a peer-node in the swarm"
+ type: "object"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ Addr:
+ description: |
+ IP address and ports at which this node can be reached.
+ type: "string"
+
+ NetworkAttachmentConfig:
+ description: |
+ Specifies how a service should be attached to a particular network.
+ type: "object"
+ properties:
+ Target:
+ description: |
+ The target network for attachment. Must be a network name or ID.
+ type: "string"
+ Aliases:
+ description: |
+ Discoverable alternate names for the service on this network.
+ type: "array"
+ items:
+ type: "string"
+ DriverOpts:
+ description: |
+ Driver attachment options for the network target.
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ EventActor:
+ description: |
+ Actor describes something that generates events, like a container, network,
+ or a volume.
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the object emitting the event"
+ type: "string"
+ example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ Attributes:
+ description: |
+ Various key/value attributes of the object, depending on its type.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-label-value"
+ image: "alpine:latest"
+ name: "my-container"
+
+ EventMessage:
+ description: |
+ EventMessage represents the information an event contains.
+ type: "object"
+ title: "SystemEventsResponse"
+ properties:
+ Type:
+ description: "The type of object emitting the event"
+ type: "string"
+ enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"]
+ example: "container"
+ Action:
+ description: "The type of event"
+ type: "string"
+ example: "create"
+ Actor:
+ $ref: "#/definitions/EventActor"
+ scope:
+ description: |
+ Scope of the event. Engine events are `local` scope. Cluster (Swarm)
+ events are `swarm` scope.
+ type: "string"
+ enum: ["local", "swarm"]
+ time:
+ description: "Timestamp of event"
+ type: "integer"
+ format: "int64"
+ example: 1629574695
+ timeNano:
+ description: "Timestamp of event, with nanosecond accuracy"
+ type: "integer"
+ format: "int64"
+ example: 1629574695515050031
+
+ OCIDescriptor:
+ type: "object"
+ x-go-name: Descriptor
+ description: |
+ A descriptor struct containing digest, media type, and size, as defined in
+ the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md).
+ properties:
+ mediaType:
+ description: |
+ The media type of the object this schema refers to.
+ type: "string"
+ example: "application/vnd.oci.image.manifest.v1+json"
+ digest:
+ description: |
+ The digest of the targeted content.
+ type: "string"
+ example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
+ size:
+ description: |
+ The size in bytes of the blob.
+ type: "integer"
+ format: "int64"
+ example: 424
+ urls:
+ description: |-
+ List of URLs from which this object MAY be downloaded.
+ type: "array"
+ items:
+ type: "string"
+ format: "uri"
+ x-nullable: true
+ annotations:
+ description: |-
+ Arbitrary metadata relating to the targeted content.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ "com.docker.official-images.bashbrew.arch": "amd64"
+ "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8"
+ "org.opencontainers.image.base.name": "scratch"
+ "org.opencontainers.image.created": "2025-01-27T00:00:00Z"
+ "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79"
+ "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base"
+ "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu"
+ "org.opencontainers.image.version": "24.04"
+ data:
+ type: string
+ x-nullable: true
+ description: |-
+ Data is an embedding of the targeted content. This is encoded as a base64
+ string when marshalled to JSON (automatically, by encoding/json). If
+ present, Data can be used directly to avoid fetching the targeted content.
+ example: null
+ platform:
+ $ref: "#/definitions/OCIPlatform"
+ artifactType:
+ description: |-
+ ArtifactType is the IANA media type of this artifact.
+ type: "string"
+ x-nullable: true
+ example: null
+
+ OCIPlatform:
+ type: "object"
+ x-go-name: Platform
+ x-nullable: true
+ description: |
+ Describes the platform which the image in the manifest runs on, as defined
+ in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md).
+ properties:
+ architecture:
+ description: |
+ The CPU architecture, for example `amd64` or `ppc64`.
+ type: "string"
+ example: "arm"
+ os:
+ description: |
+ The operating system, for example `linux` or `windows`.
+ type: "string"
+ example: "windows"
+ os.version:
+ description: |
+ Optional field specifying the operating system version, for example on
+ Windows `10.0.19041.1165`.
+ type: "string"
+ example: "10.0.19041.1165"
+ os.features:
+ description: |
+ Optional field specifying an array of strings, each listing a required
+ OS feature (for example on Windows `win32k`).
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "win32k"
+ variant:
+ description: |
+ Optional field specifying a variant of the CPU, for example `v7` to
+ specify ARMv7 when architecture is `arm`.
+ type: "string"
+ example: "v7"
+
+ DistributionInspect:
+ type: "object"
+ x-go-name: DistributionInspect
+ title: "DistributionInspectResponse"
+ required: [Descriptor, Platforms]
+ description: |
+ Describes the result obtained from contacting the registry to retrieve
+ image metadata.
+ properties:
+ Descriptor:
+ $ref: "#/definitions/OCIDescriptor"
+ Platforms:
+ type: "array"
+ description: |
+ An array containing all platforms supported by the image.
+ items:
+ $ref: "#/definitions/OCIPlatform"
+
+ ClusterVolume:
+ type: "object"
+ description: |
+ Options and information specific to, and only present on, Swarm CSI
+ cluster volumes.
+ properties:
+ ID:
+ type: "string"
+ description: |
+ The Swarm ID of this volume. Because cluster volumes are Swarm
+ objects, they have an ID, unlike non-cluster volumes. This ID can
+ be used to refer to the Volume instead of the name.
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ClusterVolumeSpec"
+ Info:
+ type: "object"
+ description: |
+ Information about the global status of the volume.
+ properties:
+ CapacityBytes:
+ type: "integer"
+ format: "int64"
+ description: |
+ The capacity of the volume in bytes. A value of 0 indicates that
+ the capacity is unknown.
+ VolumeContext:
+ type: "object"
+ description: |
+ A map of strings to strings returned from the storage plugin when
+ the volume is created.
+ additionalProperties:
+ type: "string"
+ VolumeID:
+ type: "string"
+ description: |
+ The ID of the volume as returned by the CSI storage plugin. This
+ is distinct from the volume's ID as provided by Docker. This ID
+ is never used by the user when communicating with Docker to refer
+ to this volume. If the ID is blank, then the Volume has not been
+ successfully created in the plugin yet.
+ AccessibleTopology:
+ type: "array"
+ description: |
+ The topology this volume is actually accessible from.
+ items:
+ $ref: "#/definitions/Topology"
+ PublishStatus:
+ type: "array"
+ description: |
+ The status of the volume as it pertains to its publishing and use on
+ specific nodes
+ items:
+ type: "object"
+ properties:
+ NodeID:
+ type: "string"
+ description: |
+ The ID of the Swarm node the volume is published on.
+ State:
+ type: "string"
+ description: |
+ The published state of the volume.
+ * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed.
+ * `published` The volume is published successfully to the node.
+ * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so.
+ * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller.
+ enum:
+ - "pending-publish"
+ - "published"
+ - "pending-node-unpublish"
+ - "pending-controller-unpublish"
+ PublishContext:
+ type: "object"
+ description: |
+ A map of strings to strings returned by the CSI controller
+ plugin when a volume is published.
+ additionalProperties:
+ type: "string"
+
+ ClusterVolumeSpec:
+ type: "object"
+ description: |
+ Cluster-specific options used to create the volume.
+ properties:
+ Group:
+ type: "string"
+ description: |
+ Group defines the volume group of this volume. Volumes belonging to
+ the same group can be referred to by group name when creating
+ Services. Referring to a volume by group instructs Swarm to treat
+ volumes in that group interchangeably for the purpose of scheduling.
+ Volumes with an empty string for a group technically all belong to
+ the same, emptystring group.
+ AccessMode:
+ type: "object"
+ description: |
+ Defines how the volume is used by tasks.
+ properties:
+ Scope:
+ type: "string"
+ description: |
+ The set of nodes this volume can be used on at one time.
+ - `single` The volume may only be scheduled to one node at a time.
+ - `multi` the volume may be scheduled to any supported number of nodes at a time.
+ default: "single"
+ enum: ["single", "multi"]
+ x-nullable: false
+ Sharing:
+ type: "string"
+ description: |
+ The number and way that different tasks can use this volume
+ at one time.
+ - `none` The volume may only be used by one task at a time.
+ - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly
+ - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write.
+ - `all` The volume may have any number of readers and writers.
+ default: "none"
+ enum: ["none", "readonly", "onewriter", "all"]
+ x-nullable: false
+ MountVolume:
+ type: "object"
+ description: |
+ Options for using this volume as a Mount-type volume.
+
+ Either MountVolume or BlockVolume, but not both, must be
+ present.
+ properties:
+ FsType:
+ type: "string"
+ description: |
+ Specifies the filesystem type for the mount volume.
+ Optional.
+ MountFlags:
+ type: "array"
+ description: |
+ Flags to pass when mounting the volume. Optional.
+ items:
+ type: "string"
+ BlockVolume:
+ type: "object"
+ description: |
+ Options for using this volume as a Block-type volume.
+ Intentionally empty.
+ Secrets:
+ type: "array"
+ description: |
+ Swarm Secrets that are passed to the CSI storage plugin when
+ operating on this volume.
+ items:
+ type: "object"
+ description: |
+ One cluster volume secret entry. Defines a key-value pair that
+ is passed to the plugin.
+ properties:
+ Key:
+ type: "string"
+ description: |
+ Key is the name of the key of the key-value pair passed to
+ the plugin.
+ Secret:
+ type: "string"
+ description: |
+ Secret is the swarm Secret object from which to read data.
+ This can be a Secret name or ID. The Secret data is
+ retrieved by swarm and used as the value of the key-value
+ pair passed to the plugin.
+ AccessibilityRequirements:
+ type: "object"
+ description: |
+ Requirements for the accessible topology of the volume. These
+ fields are optional. For an in-depth description of what these
+ fields mean, see the CSI specification.
+ properties:
+ Requisite:
+ type: "array"
+ description: |
+ A list of required topologies, at least one of which the
+ volume must be accessible from.
+ items:
+ $ref: "#/definitions/Topology"
+ Preferred:
+ type: "array"
+ description: |
+ A list of topologies that the volume should attempt to be
+ provisioned in.
+ items:
+ $ref: "#/definitions/Topology"
+ CapacityRange:
+ type: "object"
+ description: |
+ The desired capacity that the volume should be created with. If
+ empty, the plugin will decide the capacity.
+ properties:
+ RequiredBytes:
+ type: "integer"
+ format: "int64"
+ description: |
+ The volume must be at least this big. The value of 0
+ indicates an unspecified minimum
+ LimitBytes:
+ type: "integer"
+ format: "int64"
+ description: |
+ The volume must not be bigger than this. The value of 0
+ indicates an unspecified maximum.
+ Availability:
+ type: "string"
+ description: |
+ The availability of the volume for use in tasks.
+ - `active` The volume is fully available for scheduling on the cluster
+ - `pause` No new workloads should use the volume, but existing workloads are not stopped.
+ - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started.
+ default: "active"
+ x-nullable: false
+ enum:
+ - "active"
+ - "pause"
+ - "drain"
+
+ Topology:
+ description: |
+ A map of topological domains to topological segments. For in depth
+ details, see documentation for the Topology object in the CSI
+ specification.
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ ImageManifestSummary:
+ x-go-name: "ManifestSummary"
+ description: |
+ ImageManifestSummary represents a summary of an image manifest.
+ type: "object"
+ required: ["ID", "Descriptor", "Available", "Size", "Kind"]
+ properties:
+ ID:
+ description: |
+ ID is the content-addressable ID of an image and is the same as the
+ digest of the image manifest.
+ type: "string"
+ example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
+ Descriptor:
+ $ref: "#/definitions/OCIDescriptor"
+ Available:
+ description: Indicates whether all the child content (image config, layers) is fully available locally.
+ type: "boolean"
+ example: true
+ Size:
+ type: "object"
+ x-nullable: false
+ required: ["Content", "Total"]
+ properties:
+ Total:
+ type: "integer"
+ format: "int64"
+ example: 8213251
+ description: |
+ Total is the total size (in bytes) of all the locally present
+ data (both distributable and non-distributable) that's related to
+ this manifest and its children.
+ This equal to the sum of [Content] size AND all the sizes in the
+ [Size] struct present in the Kind-specific data struct.
+ For example, for an image kind (Kind == "image")
+ this would include the size of the image content and unpacked
+ image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
+ Content:
+ description: |
+ Content is the size (in bytes) of all the locally present
+ content in the content store (e.g. image config, layers)
+ referenced by this manifest and its children.
+ This only includes blobs in the content store.
+ type: "integer"
+ format: "int64"
+ example: 3987495
+ Kind:
+ type: "string"
+ example: "image"
+ enum:
+ - "image"
+ - "attestation"
+ - "unknown"
+ description: |
+ The kind of the manifest.
+
+ kind | description
+ -------------|-----------------------------------------------------------
+ image | Image manifest that can be used to start a container.
+ attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest.
+ ImageData:
+ description: |
+ The image data for the image manifest.
+ This field is only populated when Kind is "image".
+ type: "object"
+ x-nullable: true
+ x-omitempty: true
+ required: ["Platform", "Containers", "Size", "UnpackedSize"]
+ properties:
+ Platform:
+ $ref: "#/definitions/OCIPlatform"
+ description: |
+ OCI platform of the image. This will be the platform specified in the
+ manifest descriptor from the index/manifest list.
+ If it's not available, it will be obtained from the image config.
+ Containers:
+ description: |
+ The IDs of the containers that are using this image.
+ type: "array"
+ items:
+ type: "string"
+ example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"]
+ Size:
+ type: "object"
+ x-nullable: false
+ required: ["Unpacked"]
+ properties:
+ Unpacked:
+ type: "integer"
+ format: "int64"
+ example: 3987495
+ description: |
+ Unpacked is the size (in bytes) of the locally unpacked
+ (uncompressed) image content that's directly usable by the containers
+ running this image.
+ It's independent of the distributable content - e.g.
+ the image might still have an unpacked data that's still used by
+ some container even when the distributable/compressed content is
+ already gone.
+ AttestationData:
+ description: |
+ The image data for the attestation manifest.
+ This field is only populated when Kind is "attestation".
+ type: "object"
+ x-nullable: true
+ x-omitempty: true
+ required: ["For"]
+ properties:
+ For:
+ description: |
+ The digest of the image manifest that this attestation is for.
+ type: "string"
+ example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f"
+
+paths:
+ /containers/json:
+ get:
+ summary: "List containers"
+ description: |
+ Returns a list of containers. For details on the format, see the
+ [inspect endpoint](#operation/ContainerInspect).
+
+ Note that it uses a different, smaller representation of a container
+ than inspecting a single container. For example, the list of linked
+ containers is not propagated .
+ operationId: "ContainerList"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: |
+ Return all containers. By default, only running containers are shown.
+ type: "boolean"
+ default: false
+ - name: "limit"
+ in: "query"
+ description: |
+ Return this number of most recently created containers, including
+ non-running ones.
+ type: "integer"
+ - name: "size"
+ in: "query"
+ description: |
+ Return the size of container as fields `SizeRw` and `SizeRootFs`.
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the container list, encoded as JSON (a
+ `map[string][]string`). For example, `{"status": ["paused"]}` will
+ only return paused containers.
+
+ Available filters:
+
+ - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
+ - `before`=(`<container id>` or `<container name>`)
+ - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `exited=<int>` containers with exit code of `<int>`
+ - `health`=(`starting`|`healthy`|`unhealthy`|`none`)
+ - `id=<ID>` a container's ID
+ - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
+ - `is-task=`(`true`|`false`)
+ - `label=key` or `label="key=value"` of a container label
+ - `name=<name>` a container's name
+ - `network`=(`<network id>` or `<network name>`)
+ - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `since`=(`<container id>` or `<container name>`)
+ - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
+ - `volume`=(`<volume name>` or `<mount point destination>`)
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerSummary"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/create:
+ post:
+ summary: "Create a container"
+ operationId: "ContainerCreate"
+ consumes:
+ - "application/json"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: |
+ Assign the specified name to the container. Must match
+ `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
+ type: "string"
+ pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
+ - name: "platform"
+ in: "query"
+ description: |
+ Platform in the format `os[/arch[/variant]]` used for image lookup.
+
+ When specified, the daemon checks if the requested image is present
+ in the local image cache with the given OS and Architecture, and
+ otherwise returns a `404` status.
+
+ If the option is not set, the host's native OS and Architecture are
+ used to look up the image in the image cache. However, if no platform
+ is passed and the given image does exist in the local image cache,
+ but its OS or architecture does not match, the container is created
+ with the available image, and a warning is added to the `Warnings`
+ field in the response, for example;
+
+ WARNING: The requested image's platform (linux/arm64/v8) does not
+ match the detected host platform (linux/amd64) and no
+ specific platform was requested
+
+ type: "string"
+ default: ""
+ - name: "body"
+ in: "body"
+ description: "Container to create"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ContainerConfig"
+ - type: "object"
+ properties:
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ NetworkingConfig:
+ $ref: "#/definitions/NetworkingConfig"
+ example:
+ Hostname: ""
+ Domainname: ""
+ User: ""
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ Tty: false
+ OpenStdin: false
+ StdinOnce: false
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ Cmd:
+ - "date"
+ Entrypoint: ""
+ Image: "ubuntu"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ Volumes:
+ /volumes/data: {}
+ WorkingDir: ""
+ NetworkDisabled: false
+ MacAddress: "12:34:56:78:9a:bc"
+ ExposedPorts:
+ 22/tcp: {}
+ StopSignal: "SIGTERM"
+ StopTimeout: 10
+ HostConfig:
+ Binds:
+ - "/tmp:/tmp"
+ Links:
+ - "redis3:redis"
+ Memory: 0
+ MemorySwap: 0
+ MemoryReservation: 0
+ NanoCpus: 500000
+ CpuPercent: 80
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpuQuota: 50000
+ CpusetCpus: "0,1"
+ CpusetMems: "0,1"
+ MaximumIOps: 0
+ MaximumIOBps: 0
+ BlkioWeight: 300
+ BlkioWeightDevice:
+ - {}
+ BlkioDeviceReadBps:
+ - {}
+ BlkioDeviceReadIOps:
+ - {}
+ BlkioDeviceWriteBps:
+ - {}
+ BlkioDeviceWriteIOps:
+ - {}
+ DeviceRequests:
+ - Driver: "nvidia"
+ Count: -1
+ DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
+ Capabilities: [["gpu", "nvidia", "compute"]]
+ Options:
+ property1: "string"
+ property2: "string"
+ MemorySwappiness: 60
+ OomKillDisable: false
+ OomScoreAdj: 500
+ PidMode: ""
+ PidsLimit: 0
+ PortBindings:
+ 22/tcp:
+ - HostPort: "11022"
+ PublishAllPorts: false
+ Privileged: false
+ ReadonlyRootfs: false
+ Dns:
+ - "8.8.8.8"
+ DnsOptions:
+ - ""
+ DnsSearch:
+ - ""
+ VolumesFrom:
+ - "parent"
+ - "other:ro"
+ CapAdd:
+ - "NET_ADMIN"
+ CapDrop:
+ - "MKNOD"
+ GroupAdd:
+ - "newgroup"
+ RestartPolicy:
+ Name: ""
+ MaximumRetryCount: 0
+ AutoRemove: true
+ NetworkMode: "bridge"
+ Devices: []
+ Ulimits:
+ - {}
+ LogConfig:
+ Type: "json-file"
+ Config: {}
+ SecurityOpt: []
+ StorageOpt: {}
+ CgroupParent: ""
+ VolumeDriver: ""
+ ShmSize: 67108864
+ NetworkingConfig:
+ EndpointsConfig:
+ isolated_nw:
+ IPAMConfig:
+ IPv4Address: "172.20.30.33"
+ IPv6Address: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ - "169.254.34.68"
+ - "fe80::3468"
+ Links:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ - "server_x"
+ - "server_y"
+ database_nw: {}
+
+ required: true
+ responses:
+ 201:
+ description: "Container created successfully"
+ schema:
+ $ref: "#/definitions/ContainerCreateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/{id}/json:
+ get:
+ summary: "Inspect a container"
+ description: "Return low-level information about a container."
+ operationId: "ContainerInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ContainerInspectResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "size"
+ in: "query"
+ type: "boolean"
+ default: false
+ description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
+ tags: ["Container"]
+ /containers/{id}/top:
+ get:
+ summary: "List processes running inside a container"
+ description: |
+ On Unix systems, this is done by running the `ps` command. This endpoint
+ is not supported on Windows.
+ operationId: "ContainerTop"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ContainerTopResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "ps_args"
+ in: "query"
+ description: "The arguments to pass to `ps`. For example, `aux`"
+ type: "string"
+ default: "-ef"
+ tags: ["Container"]
+ /containers/{id}/logs:
+ get:
+ summary: "Get container logs"
+ description: |
+ Get `stdout` and `stderr` logs from a container.
+
+ Note: This endpoint works only for containers with the `json-file` or
+ `journald` logging driver.
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/vnd.docker.multiplexed-stream"
+ operationId: "ContainerLogs"
+ responses:
+ 200:
+ description: |
+ logs returned as a stream in response body.
+ For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ Note that unlike the attach endpoint, the logs endpoint does not
+ upgrade the connection and does not set Content-Type.
+ schema:
+ type: "string"
+ format: "binary"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "follow"
+ in: "query"
+ description: "Keep connection after returning logs."
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "until"
+ in: "query"
+ description: "Only return logs before this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
+ type: "string"
+ default: "all"
+ tags: ["Container"]
+ /containers/{id}/changes:
+ get:
+ summary: "Get changes on a container’s filesystem"
+ description: |
+ Returns which files in a container's filesystem have been added, deleted,
+ or modified. The `Kind` of modification can be one of:
+
+ - `0`: Modified ("C")
+ - `1`: Added ("A")
+ - `2`: Deleted ("D")
+ operationId: "ContainerChanges"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The list of changes"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/FilesystemChange"
+ examples:
+ application/json:
+ - Path: "/dev"
+ Kind: 0
+ - Path: "/dev/kmsg"
+ Kind: 1
+ - Path: "/test"
+ Kind: 1
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/export:
+ get:
+ summary: "Export a container"
+ description: "Export the contents of a container as a tarball."
+ operationId: "ContainerExport"
+ produces:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stats:
+ get:
+ summary: "Get container stats based on resource usage"
+ description: |
+ This endpoint returns a live stream of a container’s resource usage
+ statistics.
+
+ The `precpu_stats` is the CPU statistic of the *previous* read, and is
+ used to calculate the CPU usage percentage. It is not an exact copy
+ of the `cpu_stats` field.
+
+ If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
+ nil then for compatibility with older daemons the length of the
+ corresponding `cpu_usage.percpu_usage` array should be used.
+
+ On a cgroup v2 host, the following fields are not set
+ * `blkio_stats`: all fields other than `io_service_bytes_recursive`
+ * `cpu_stats`: `cpu_usage.percpu_usage`
+ * `memory_stats`: `max_usage` and `failcnt`
+ Also, `memory_stats.stats` fields are incompatible with cgroup v1.
+
+ To calculate the values shown by the `stats` command of the docker cli tool
+ the following formulas can be used:
+ * used_memory = `memory_stats.usage - memory_stats.stats.cache`
+ * available_memory = `memory_stats.limit`
+ * Memory usage % = `(used_memory / available_memory) * 100.0`
+ * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
+ * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
+ * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
+ * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
+ operationId: "ContainerStats"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ContainerStatsResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "stream"
+ in: "query"
+ description: |
+ Stream the output. If false, the stats will be output once and then
+ it will disconnect.
+ type: "boolean"
+ default: true
+ - name: "one-shot"
+ in: "query"
+ description: |
+ Only get a single stat instead of waiting for 2 cycles. Must be used
+ with `stream=false`.
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/resize:
+ post:
+ summary: "Resize a container TTY"
+ description: "Resize the TTY for a container."
+ operationId: "ContainerResize"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "cannot resize container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "h"
+ in: "query"
+ required: true
+ description: "Height of the TTY session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ required: true
+ description: "Width of the TTY session in characters"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/start:
+ post:
+ summary: "Start a container"
+ operationId: "ContainerStart"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already started"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: |
+ Override the key sequence for detaching a container. Format is a
+ single character `[a-Z]` or `ctrl-<value>` where `<value>` is one
+ of: `a-z`, `@`, `^`, `[`, `,` or `_`.
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stop:
+ post:
+ summary: "Stop a container"
+ operationId: "ContainerStop"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already stopped"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "signal"
+ in: "query"
+ description: |
+ Signal to send to the container as an integer or string (e.g. `SIGINT`).
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/restart:
+ post:
+ summary: "Restart a container"
+ operationId: "ContainerRestart"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "signal"
+ in: "query"
+ description: |
+ Signal to send to the container as an integer or string (e.g. `SIGINT`).
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/kill:
+ post:
+ summary: "Kill a container"
+ description: |
+ Send a POSIX signal to a container, defaulting to killing to the
+ container.
+ operationId: "ContainerKill"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "container is not running"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "signal"
+ in: "query"
+ description: |
+ Signal to send to the container as an integer or string (e.g. `SIGINT`).
+ type: "string"
+ default: "SIGKILL"
+ tags: ["Container"]
+ /containers/{id}/update:
+ post:
+ summary: "Update a container"
+ description: |
+ Change various configuration options of a container without having to
+ recreate it.
+ operationId: "ContainerUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has been updated."
+ schema:
+ $ref: "#/definitions/ContainerUpdateResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "update"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ example:
+ BlkioWeight: 300
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuQuota: 50000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpusetCpus: "0,1"
+ CpusetMems: "0"
+ Memory: 314572800
+ MemorySwap: 514288000
+ MemoryReservation: 209715200
+ RestartPolicy:
+ MaximumRetryCount: 4
+ Name: "on-failure"
+ tags: ["Container"]
+ /containers/{id}/rename:
+ post:
+ summary: "Rename a container"
+ operationId: "ContainerRename"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "name already in use"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "name"
+ in: "query"
+ required: true
+ description: "New name for the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/pause:
+ post:
+ summary: "Pause a container"
+ description: |
+ Use the freezer cgroup to suspend all processes in a container.
+
+ Traditionally, when suspending a process the `SIGSTOP` signal is used,
+ which is observable by the process being suspended. With the freezer
+ cgroup the process is unaware, and unable to capture, that it is being
+ suspended, and subsequently resumed.
+ operationId: "ContainerPause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/unpause:
+ post:
+ summary: "Unpause a container"
+ description: "Resume a container which has been paused."
+ operationId: "ContainerUnpause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/attach:
+ post:
+ summary: "Attach to a container"
+ description: |
+ Attach to a container to read its output or send it input. You can attach
+ to the same container multiple times and you can reattach to containers
+ that have been detached.
+
+ Either the `stream` or `logs` parameter must be `true` for this endpoint
+ to do anything.
+
+ See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/)
+ for more details.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`,
+ and `stderr` on the same socket.
+
+ This is the response from the daemon for an attach request:
+
+ ```
+ HTTP/1.1 200 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ [STREAM]
+ ```
+
+ After the headers and two new lines, the TCP connection can now be used
+ for raw, bidirectional communication between the client and server.
+
+ To hint potential proxies about connection hijacking, the Docker client
+ can also optionally send connection upgrade headers.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
+ Upgrade: tcp
+ Connection: Upgrade
+ ```
+
+ The Docker daemon will respond with a `101 UPGRADED` response, and will
+ similarly follow with the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Content-Type: application/vnd.docker.raw-stream
+ Connection: Upgrade
+ Upgrade: tcp
+
+ [STREAM]
+ ```
+
+ ### Stream format
+
+ When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate),
+ the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream
+ and the stream over the hijacked connected is multiplexed to separate out
+ `stdout` and `stderr`. The stream consists of a series of frames, each
+ containing a header and a payload.
+
+ The header contains the information which the stream writes (`stdout` or
+ `stderr`). It also contains the size of the associated frame encoded in
+ the last four bytes (`uint32`).
+
+ It is encoded on the first eight bytes like this:
+
+ ```go
+ header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+ ```
+
+ `STREAM_TYPE` can be:
+
+ - 0: `stdin` (is written on `stdout`)
+ - 1: `stdout`
+ - 2: `stderr`
+
+ `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size
+ encoded as big endian.
+
+ Following the header is the payload, which is the specified number of
+ bytes of `STREAM_TYPE`.
+
+ The simplest way to implement this protocol is the following:
+
+ 1. Read 8 bytes.
+ 2. Choose `stdout` or `stderr` depending on the first byte.
+ 3. Extract the frame size from the last four bytes.
+ 4. Read the extracted size and output it on the correct output.
+ 5. Goto 1.
+
+ ### Stream format when using a TTY
+
+ When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate),
+ the stream is not multiplexed. The data exchanged over the hijacked
+ connection is simply the raw data from the process PTY and client's
+ `stdin`.
+
+ operationId: "ContainerAttach"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/vnd.docker.multiplexed-stream"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: |
+ Override the key sequence for detaching a container.Format is a single
+ character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
+ `@`, `^`, `[`, `,` or `_`.
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: |
+ Replay previous logs from the container.
+
+ This is useful for attaching to a container that has started and you
+ want to output everything since the container started.
+
+ If `stream` is also enabled, once all the previous output has been
+ returned, it will seamlessly transition into streaming current
+ output.
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: |
+ Stream attached streams from the time the request was made onwards.
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/attach/ws:
+ get:
+ summary: "Attach to a container via a websocket"
+ operationId: "ContainerAttachWebsocket"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: |
+ Override the key sequence for detaching a container.Format is a single
+ character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
+ `@`, `^`, `[`, `,`, or `_`.
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: "Return logs"
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: "Return stream"
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/wait:
+ post:
+ summary: "Wait for a container"
+ description: "Block until a container stops, then returns the exit code."
+ operationId: "ContainerWait"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has exit."
+ schema:
+ $ref: "#/definitions/ContainerWaitResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "condition"
+ in: "query"
+ description: |
+ Wait until a container state reaches the given condition.
+
+ Defaults to `not-running` if omitted or empty.
+ type: "string"
+ enum:
+ - "not-running"
+ - "next-exit"
+ - "removed"
+ default: "not-running"
+ tags: ["Container"]
+ /containers/{id}:
+ delete:
+ summary: "Remove a container"
+ operationId: "ContainerDelete"
+ responses:
+ 204:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: |
+ You cannot remove a running container: c2ada9df5af8. Stop the
+ container before attempting removal or force remove
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "v"
+ in: "query"
+ description: "Remove anonymous volumes associated with the container."
+ type: "boolean"
+ default: false
+ - name: "force"
+ in: "query"
+ description: "If the container is running, kill it before removing it."
+ type: "boolean"
+ default: false
+ - name: "link"
+ in: "query"
+ description: "Remove the specified link associated with the container."
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/archive:
+ head:
+ summary: "Get information about files in a container"
+ description: |
+ A response header `X-Docker-Container-Path-Stat` is returned, containing
+ a base64 - encoded JSON object with some filesystem header information
+ about the path.
+ operationId: "ContainerArchiveInfo"
+ responses:
+ 200:
+ description: "no error"
+ headers:
+ X-Docker-Container-Path-Stat:
+ type: "string"
+ description: |
+ A base64 - encoded JSON object with some filesystem header
+ information about the path
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ get:
+ summary: "Get an archive of a filesystem resource in a container"
+ description: "Get a tar archive of a resource in the filesystem of container id."
+ operationId: "ContainerArchive"
+ produces: ["application/x-tar"]
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ put:
+ summary: "Extract an archive of files or folders to a directory in a container"
+ description: |
+ Upload a tar archive to be extracted to a path in the filesystem of container id.
+ `path` parameter is asserted to be a directory. If it exists as a file, 400 error
+ will be returned with message "not a directory".
+ operationId: "PutContainerArchive"
+ consumes: ["application/x-tar", "application/octet-stream"]
+ responses:
+ 200:
+ description: "The content was extracted successfully"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "not a directory"
+ 403:
+ description: "Permission denied, the volume or container rootfs is marked as read-only."
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such container or path does not exist inside the container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Path to a directory in the container to extract the archive’s contents into. "
+ type: "string"
+ - name: "noOverwriteDirNonDir"
+ in: "query"
+ description: |
+ If `1`, `true`, or `True` then it will be an error if unpacking the
+ given content would cause an existing directory to be replaced with
+ a non-directory and vice versa.
+ type: "string"
+ - name: "copyUIDGID"
+ in: "query"
+ description: |
+ If `1`, `true`, then it will copy UID/GID maps to the dest file or
+ dir
+ type: "string"
+ - name: "inputStream"
+ in: "body"
+ required: true
+ description: |
+ The input stream must be a tar archive compressed with one of the
+ following algorithms: `identity` (no compression), `gzip`, `bzip2`,
+ or `xz`.
+ schema:
+ type: "string"
+ format: "binary"
+ tags: ["Container"]
+ /containers/prune:
+ post:
+ summary: "Delete stopped containers"
+ produces:
+ - "application/json"
+ operationId: "ContainerPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ContainerPruneResponse"
+ properties:
+ ContainersDeleted:
+ description: "Container IDs that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /images/json:
+ get:
+ summary: "List Images"
+ description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
+ operationId: "ImageList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "Summary image data for the images matching the query"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: "Show all images. Only images from a final layer (no children) are shown by default."
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the images list.
+
+ Available filters:
+
+ - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ - `dangling=true`
+ - `label=key` or `label="key=value"` of an image label
+ - `reference`=(`<image-name>[:<tag>]`)
+ - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ - `until=<timestamp>`
+ type: "string"
+ - name: "shared-size"
+ in: "query"
+ description: "Compute and show shared size as a `SharedSize` field on each image."
+ type: "boolean"
+ default: false
+ - name: "digests"
+ in: "query"
+ description: "Show digest information as a `RepoDigests` field on each image."
+ type: "boolean"
+ default: false
+ - name: "manifests"
+ in: "query"
+ description: "Include `Manifests` in the image summary."
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /build:
+ post:
+ summary: "Build an image"
+ description: |
+ Build an image from a tar archive with a `Dockerfile` in it.
+
+ The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
+
+ The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
+
+ The build is canceled if the client drops the connection by quitting or being killed.
+ operationId: "ImageBuild"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "inputStream"
+ in: "body"
+ description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "dockerfile"
+ in: "query"
+ description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
+ type: "string"
+ default: "Dockerfile"
+ - name: "t"
+ in: "query"
+ description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
+ type: "string"
+ - name: "extrahosts"
+ in: "query"
+ description: "Extra hosts to add to /etc/hosts"
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
+ type: "string"
+ - name: "q"
+ in: "query"
+ description: "Suppress verbose build output."
+ type: "boolean"
+ default: false
+ - name: "nocache"
+ in: "query"
+ description: "Do not use the cache when building the image."
+ type: "boolean"
+ default: false
+ - name: "cachefrom"
+ in: "query"
+ description: "JSON array of images used for build cache resolution."
+ type: "string"
+ - name: "pull"
+ in: "query"
+ description: "Attempt to pull the image even if an older image exists locally."
+ type: "string"
+ - name: "rm"
+ in: "query"
+ description: "Remove intermediate containers after a successful build."
+ type: "boolean"
+ default: true
+ - name: "forcerm"
+ in: "query"
+ description: "Always remove intermediate containers, even upon failure."
+ type: "boolean"
+ default: false
+ - name: "memory"
+ in: "query"
+ description: "Set memory limit for build."
+ type: "integer"
+ - name: "memswap"
+ in: "query"
+ description: "Total memory (memory + swap). Set as `-1` to disable swap."
+ type: "integer"
+ - name: "cpushares"
+ in: "query"
+ description: "CPU shares (relative weight)."
+ type: "integer"
+ - name: "cpusetcpus"
+ in: "query"
+ description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
+ type: "string"
+ - name: "cpuperiod"
+ in: "query"
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ - name: "cpuquota"
+ in: "query"
+ description: "Microseconds of CPU time that the container can get in a CPU period."
+ type: "integer"
+ - name: "buildargs"
+ in: "query"
+ description: >
+ JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker
+ uses the buildargs as the environment context for commands run via the `Dockerfile` RUN
+ instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for
+ passing secret values.
+
+
+ For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
+ query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
+
+
+ [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
+ type: "string"
+ - name: "shmsize"
+ in: "query"
+ description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
+ type: "integer"
+ - name: "squash"
+ in: "query"
+ description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
+ type: "boolean"
+ - name: "labels"
+ in: "query"
+ description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
+ type: "string"
+ - name: "networkmode"
+ in: "query"
+ description: |
+ Sets the networking mode for the run commands during build. Supported
+ standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
+ Any other value is taken as a custom network's name or ID to which this
+ container should connect to.
+ type: "string"
+ - name: "Content-type"
+ in: "header"
+ type: "string"
+ enum:
+ - "application/x-tar"
+ default: "application/x-tar"
+ - name: "X-Registry-Config"
+ in: "header"
+ description: |
+ This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
+
+ The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
+
+ ```
+ {
+ "docker.example.com": {
+ "username": "janedoe",
+ "password": "hunter2"
+ },
+ "https://index.docker.io/v1/": {
+ "username": "mobydock",
+ "password": "conta1n3rize14"
+ }
+ }
+ ```
+
+ Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
+ type: "string"
+ - name: "platform"
+ in: "query"
+ description: "Platform in the format os[/arch[/variant]]"
+ type: "string"
+ default: ""
+ - name: "target"
+ in: "query"
+ description: "Target build stage"
+ type: "string"
+ default: ""
+ - name: "outputs"
+ in: "query"
+ description: "BuildKit output configuration"
+ type: "string"
+ default: ""
+ - name: "version"
+ in: "query"
+ type: "string"
+ default: "1"
+ enum: ["1", "2"]
+ description: |
+ Version of the builder backend to use.
+
+ - `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
+ - `2` is [BuildKit](https://github.com/moby/buildkit)
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /build/prune:
+ post:
+ summary: "Delete builder cache"
+ produces:
+ - "application/json"
+ operationId: "BuildPrune"
+ parameters:
+ - name: "keep-storage"
+ in: "query"
+ description: |
+ Amount of disk space in bytes to keep for cache
+
+ > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space".
+ > It is kept for backward compatibility and will be removed in API v1.49.
+ type: "integer"
+ format: "int64"
+ - name: "reserved-space"
+ in: "query"
+ description: "Amount of disk space in bytes to keep for cache"
+ type: "integer"
+ format: "int64"
+ - name: "max-used-space"
+ in: "query"
+ description: "Maximum amount of disk space allowed to keep for cache"
+ type: "integer"
+ format: "int64"
+ - name: "min-free-space"
+ in: "query"
+ description: "Target amount of free disk space after pruning"
+ type: "integer"
+ format: "int64"
+ - name: "all"
+ in: "query"
+ type: "boolean"
+ description: "Remove all types of build cache"
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the list of build cache objects.
+
+ Available filters:
+
+ - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time.
+ - `id=<id>`
+ - `parent=<id>`
+ - `type=<string>`
+ - `description=<string>`
+ - `inuse`
+ - `shared`
+ - `private`
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "BuildPruneResponse"
+ properties:
+ CachesDeleted:
+ type: "array"
+ items:
+ description: "ID of build cache object"
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /images/create:
+ post:
+ summary: "Create an image"
+ description: "Pull or import an image."
+ operationId: "ImageCreate"
+ consumes:
+ - "text/plain"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "repository does not exist or no read access"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "fromImage"
+ in: "query"
+ description: |
+ Name of the image to pull. If the name includes a tag or digest, specific behavior applies:
+
+ - If only `fromImage` includes a tag, that tag is used.
+ - If both `fromImage` and `tag` are provided, `tag` takes precedence.
+ - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored.
+ - If neither a tag nor digest is specified, all tags are pulled.
+ type: "string"
+ - name: "fromSrc"
+ in: "query"
+ description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
+ type: "string"
+ - name: "message"
+ in: "query"
+ description: "Set commit message for imported image."
+ type: "string"
+ - name: "inputImage"
+ in: "body"
+ description: "Image content if the value `-` has been specified in fromSrc query parameter"
+ schema:
+ type: "string"
+ required: false
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+ - name: "changes"
+ in: "query"
+ description: |
+ Apply `Dockerfile` instructions to the image that is created,
+ for example: `changes=ENV DEBUG=true`.
+ Note that `ENV DEBUG=true` should be URI component encoded.
+
+ Supported `Dockerfile` instructions:
+ `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
+ type: "array"
+ items:
+ type: "string"
+ - name: "platform"
+ in: "query"
+ description: |
+ Platform in the format os[/arch[/variant]].
+
+ When used in combination with the `fromImage` option, the daemon checks
+ if the given image is present in the local image cache with the given
+ OS and Architecture, and otherwise attempts to pull the image. If the
+ option is not set, the host's native OS and Architecture are used.
+ If the given image does not exist in the local image cache, the daemon
+ attempts to pull the image with the host's native OS and Architecture.
+ If the given image does exists in the local image cache, but its OS or
+ architecture does not match, a warning is produced.
+
+ When used with the `fromSrc` option to import an image from an archive,
+ this option sets the platform information for the imported image. If
+ the option is not set, the host's native OS and Architecture are used
+ for the imported image.
+ type: "string"
+ default: ""
+ tags: ["Image"]
+ /images/{name}/json:
+ get:
+ summary: "Inspect an image"
+ description: "Return low-level information about an image."
+ operationId: "ImageInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/ImageInspect"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ - name: "manifests"
+ in: "query"
+ description: "Include Manifests in the image summary."
+ type: "boolean"
+ default: false
+ required: false
+ tags: ["Image"]
+ /images/{name}/history:
+ get:
+ summary: "Get the history of an image"
+ description: "Return parent layers of an image."
+ operationId: "ImageHistory"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "List of image layers"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: HistoryResponseItem
+ title: "HistoryResponseItem"
+ description: "individual image layer information in response to ImageHistory operation"
+ required: [Id, Created, CreatedBy, Tags, Size, Comment]
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ Created:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ CreatedBy:
+ type: "string"
+ x-nullable: false
+ Tags:
+ type: "array"
+ items:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ Comment:
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
+ Created: 1398108230
+ CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
+ Tags:
+ - "ubuntu:lucid"
+ - "ubuntu:10.04"
+ Size: 182964289
+ Comment: ""
+ - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
+ Created: 1398108222
+ CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
+ Tags: []
+ Size: 0
+ Comment: ""
+ - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
+ Created: 1371157430
+ CreatedBy: ""
+ Tags:
+ - "scratch12:latest"
+ - "scratch:latest"
+ Size: 0
+ Comment: "Imported from -"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ - name: "platform"
+ type: "string"
+ in: "query"
+ description: |
+ JSON-encoded OCI platform to select the platform-variant.
+ If omitted, it defaults to any locally available platform,
+ prioritizing the daemon's host platform.
+
+ If the daemon provides a multi-platform image store, this selects
+ the platform-variant to show the history for. If the image is
+ a single-platform image, or if the multi-platform image does not
+ provide a variant matching the given platform, an error is returned.
+
+ Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
+ tags: ["Image"]
+ /images/{name}/push:
+ post:
+ summary: "Push an image"
+ description: |
+ Push an image to a registry.
+
+ If you wish to push an image on to a private registry, that image must
+ already have a tag which references the registry. For example,
+ `registry.example.com/myimage:latest`.
+
+ The push is cancelled if the HTTP connection is closed.
+ operationId: "ImagePush"
+ consumes:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ Name of the image to push. For example, `registry.example.com/myimage`.
+ The image must be present in the local image store with the same name.
+
+ The name should be provided without tag; if a tag is provided, it
+ is ignored. For example, `registry.example.com/myimage:latest` is
+ considered equivalent to `registry.example.com/myimage`.
+
+ Use the `tag` parameter to specify the tag to push.
+ type: "string"
+ required: true
+ - name: "tag"
+ in: "query"
+ description: |
+ Tag of the image to push. For example, `latest`. If no tag is provided,
+ all tags of the given image that are present in the local image store
+ are pushed.
+ type: "string"
+ - name: "platform"
+ type: "string"
+ in: "query"
+ description: |
+ JSON-encoded OCI platform to select the platform-variant to push.
+ If not provided, all available variants will attempt to be pushed.
+
+ If the daemon provides a multi-platform image store, this selects
+ the platform-variant to push to the registry. If the image is
+ a single-platform image, or if the multi-platform image does not
+ provide a variant matching the given platform, an error is returned.
+
+ Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/tag:
+ post:
+ summary: "Tag an image"
+ description: "Tag an image so that it becomes part of a repository."
+ operationId: "ImageTag"
+ responses:
+ 201:
+ description: "No error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID to tag."
+ type: "string"
+ required: true
+ - name: "repo"
+ in: "query"
+ description: "The repository to tag in. For example, `someuser/someimage`."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "The name of the new tag."
+ type: "string"
+ tags: ["Image"]
+ /images/{name}:
+ delete:
+ summary: "Remove an image"
+ description: |
+ Remove an image, along with any untagged parent images that were
+ referenced by that image.
+
+ Images can't be removed if they have descendant images, are being
+ used by a running container or are being used by a build.
+ operationId: "ImageDelete"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The image was deleted successfully"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ examples:
+ application/json:
+ - Untagged: "3e2f21a89f"
+ - Deleted: "3e2f21a89f"
+ - Deleted: "53b4f83ac9"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Remove the image even if it is being used by stopped containers or has other tags"
+ type: "boolean"
+ default: false
+ - name: "noprune"
+ in: "query"
+ description: "Do not delete untagged parent images"
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /images/search:
+ get:
+ summary: "Search images"
+ description: "Search for an image on Docker Hub."
+ operationId: "ImageSearch"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ title: "ImageSearchResponseItem"
+ properties:
+ description:
+ type: "string"
+ is_official:
+ type: "boolean"
+ is_automated:
+ description: |
+ Whether this repository has automated builds enabled.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is deprecated and will always be "false".
+ type: "boolean"
+ example: false
+ name:
+ type: "string"
+ star_count:
+ type: "integer"
+ examples:
+ application/json:
+ - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!"
+ is_official: true
+ is_automated: false
+ name: "alpine"
+ star_count: 10093
+ - description: "Busybox base image."
+ is_official: true
+ is_automated: false
+ name: "Busybox base image."
+ star_count: 3037
+ - description: "The PostgreSQL object-relational database system provides reliability and data integrity."
+ is_official: true
+ is_automated: false
+ name: "postgres"
+ star_count: 12408
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "term"
+ in: "query"
+ description: "Term to search"
+ type: "string"
+ required: true
+ - name: "limit"
+ in: "query"
+ description: "Maximum number of results to return"
+ type: "integer"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+
+ - `is-official=(true|false)`
+ - `stars=<number>` Matches images that has at least 'number' stars.
+ type: "string"
+ tags: ["Image"]
+ /images/prune:
+ post:
+ summary: "Delete unused images"
+ produces:
+ - "application/json"
+ operationId: "ImagePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), prune only
+ unused *and* untagged images. When set to `false`
+ (or `0`), all unused images are pruned.
+ - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ImagePruneResponse"
+ properties:
+ ImagesDeleted:
+ description: "Images that were deleted"
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /auth:
+ post:
+ summary: "Check auth configuration"
+ description: |
+ Validate credentials for a registry and, if available, get an identity
+ token for accessing the registry without password.
+ operationId: "SystemAuth"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "An identity token was generated successfully."
+ schema:
+ type: "object"
+ title: "SystemAuthResponse"
+ required: [Status]
+ properties:
+ Status:
+ description: "The status of the authentication"
+ type: "string"
+ x-nullable: false
+ IdentityToken:
+ description: "An opaque token used to authenticate a user after a successful login"
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ Status: "Login Succeeded"
+ IdentityToken: "9cbaf023786cd7..."
+ 204:
+ description: "No error"
+ 401:
+ description: "Auth error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "authConfig"
+ in: "body"
+ description: "Authentication to check"
+ schema:
+ $ref: "#/definitions/AuthConfig"
+ tags: ["System"]
+ /info:
+ get:
+ summary: "Get system information"
+ operationId: "SystemInfo"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/SystemInfo"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /version:
+ get:
+ summary: "Get version"
+ description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
+ operationId: "SystemVersion"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/SystemVersion"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /_ping:
+ get:
+ summary: "Ping"
+ description: "This is a dummy endpoint you can use to test if the server is accessible."
+ operationId: "SystemPing"
+ produces: ["text/plain"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ example: "OK"
+ headers:
+ Api-Version:
+ type: "string"
+ description: "Max API Version the server supports"
+ Builder-Version:
+ type: "string"
+ description: |
+ Default version of docker image builder
+
+ The default on Linux is version "2" (BuildKit), but the daemon
+ can be configured to recommend version "1" (classic Builder).
+ Windows does not yet support BuildKit for native Windows images,
+ and uses "1" (classic builder) as a default.
+
+ This value is a recommendation as advertised by the daemon, and
+ it is up to the client to choose which builder to use.
+ default: "2"
+ Docker-Experimental:
+ type: "boolean"
+ description: "If the server is running with experimental mode enabled"
+ Swarm:
+ type: "string"
+ enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"]
+ description: |
+ Contains information about Swarm status of the daemon,
+ and if the daemon is acting as a manager or worker node.
+ default: "inactive"
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ headers:
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ tags: ["System"]
+ head:
+ summary: "Ping"
+ description: "This is a dummy endpoint you can use to test if the server is accessible."
+ operationId: "SystemPingHead"
+ produces: ["text/plain"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ example: "(empty)"
+ headers:
+ Api-Version:
+ type: "string"
+ description: "Max API Version the server supports"
+ Builder-Version:
+ type: "string"
+ description: "Default version of docker image builder"
+ Docker-Experimental:
+ type: "boolean"
+ description: "If the server is running with experimental mode enabled"
+ Swarm:
+ type: "string"
+ enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"]
+ description: |
+ Contains information about Swarm status of the daemon,
+ and if the daemon is acting as a manager or worker node.
+ default: "inactive"
+ Cache-Control:
+ type: "string"
+ default: "no-cache, no-store, must-revalidate"
+ Pragma:
+ type: "string"
+ default: "no-cache"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /commit:
+ post:
+ summary: "Create a new image from a container"
+ operationId: "ImageCommit"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IDResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "containerConfig"
+ in: "body"
+ description: "The container configuration"
+ schema:
+ $ref: "#/definitions/ContainerConfig"
+ - name: "container"
+ in: "query"
+ description: "The ID or name of the container to commit"
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name for the created image"
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag name for the create image"
+ type: "string"
+ - name: "comment"
+ in: "query"
+ description: "Commit message"
+ type: "string"
+ - name: "author"
+ in: "query"
+ description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)"
+ type: "string"
+ - name: "pause"
+ in: "query"
+ description: "Whether to pause the container before committing"
+ type: "boolean"
+ default: true
+ - name: "changes"
+ in: "query"
+ description: "`Dockerfile` instructions to apply while committing"
+ type: "string"
+ tags: ["Image"]
+ /events:
+ get:
+ summary: "Monitor events"
+ description: |
+ Stream real-time events from the server.
+
+ Various objects within Docker report events when something happens to them.
+
+ Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
+
+ Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
+
+ Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
+
+ Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune`
+
+ The Docker daemon reports these events: `reload`
+
+ Services report these events: `create`, `update`, and `remove`
+
+ Nodes report these events: `create`, `update`, and `remove`
+
+ Secrets report these events: `create`, `update`, and `remove`
+
+ Configs report these events: `create`, `update`, and `remove`
+
+ The Builder reports `prune` events
+
+ operationId: "SystemEvents"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/EventMessage"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "since"
+ in: "query"
+ description: "Show events created since this timestamp then stream new events."
+ type: "string"
+ - name: "until"
+ in: "query"
+ description: "Show events created until this timestamp then stop streaming."
+ type: "string"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
+
+ - `config=<string>` config name or ID
+ - `container=<string>` container name or ID
+ - `daemon=<string>` daemon name or ID
+ - `event=<string>` event type
+ - `image=<string>` image name or ID
+ - `label=<string>` image or container label
+ - `network=<string>` network name or ID
+ - `node=<string>` node ID
+ - `plugin`=<string> plugin name or ID
+ - `scope`=<string> local or swarm
+ - `secret=<string>` secret name or ID
+ - `service=<string>` service name or ID
+ - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
+ - `volume=<string>` volume name
+ type: "string"
+ tags: ["System"]
+ /system/df:
+ get:
+ summary: "Get data usage information"
+ operationId: "SystemDataUsage"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "SystemDataUsageResponse"
+ properties:
+ LayersSize:
+ type: "integer"
+ format: "int64"
+ Images:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ Containers:
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerSummary"
+ Volumes:
+ type: "array"
+ items:
+ $ref: "#/definitions/Volume"
+ BuildCache:
+ type: "array"
+ items:
+ $ref: "#/definitions/BuildCache"
+ example:
+ LayersSize: 1092588
+ Images:
+ -
+ Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ ParentId: ""
+ RepoTags:
+ - "busybox:latest"
+ RepoDigests:
+ - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
+ Created: 1466724217
+ Size: 1092588
+ SharedSize: 0
+ Labels: {}
+ Containers: 1
+ Containers:
+ -
+ Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
+ Names:
+ - "/top"
+ Image: "busybox"
+ ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ Command: "top"
+ Created: 1472592424
+ Ports: []
+ SizeRootFs: 1092588
+ Labels: {}
+ State: "exited"
+ Status: "Exited (0) 56 minutes ago"
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ IPAMConfig: null
+ Links: null
+ Aliases: null
+ NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
+ EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
+ Gateway: "172.18.0.1"
+ IPAddress: "172.18.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:12:00:02"
+ Mounts: []
+ Volumes:
+ -
+ Name: "my-volume"
+ Driver: "local"
+ Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
+ Labels: null
+ Scope: "local"
+ Options: null
+ UsageData:
+ Size: 10920104
+ RefCount: 2
+ BuildCache:
+ -
+ ID: "hw53o5aio51xtltp5xjp8v7fx"
+ Parents: []
+ Type: "regular"
+ Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
+ InUse: false
+ Shared: true
+ Size: 0
+ CreatedAt: "2021-06-28T13:31:01.474619385Z"
+ LastUsedAt: "2021-07-07T22:02:32.738075951Z"
+ UsageCount: 26
+ -
+ ID: "ndlpt0hhvkqcdfkputsk4cq9c"
+ Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"]
+ Type: "regular"
+ Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
+ InUse: false
+ Shared: true
+ Size: 51
+ CreatedAt: "2021-06-28T13:31:03.002625487Z"
+ LastUsedAt: "2021-07-07T22:02:32.773909517Z"
+ UsageCount: 26
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "type"
+ in: "query"
+ description: |
+ Object types, for which to compute and return data.
+ type: "array"
+ collectionFormat: multi
+ items:
+ type: "string"
+ enum: ["container", "image", "volume", "build-cache"]
+ tags: ["System"]
+ /images/{name}/get:
+ get:
+ summary: "Export an image"
+ description: |
+ Get a tarball containing all images and metadata for a repository.
+
+ If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
+
+ ### Image tarball format
+
+ An image tarball contains one directory per image layer (named using its long ID), each containing these files:
+
+ - `VERSION`: currently `1.0` - the file format version
+ - `json`: detailed layer information, similar to `docker inspect layer_id`
+ - `layer.tar`: A tarfile containing the filesystem changes in this layer
+
+ The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
+
+ If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
+
+ ```json
+ {
+ "hello-world": {
+ "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
+ }
+ }
+ ```
+ operationId: "ImageGet"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ - name: "platform"
+ type: "string"
+ in: "query"
+ description: |
+ JSON encoded OCI platform describing a platform which will be used
+ to select a platform-specific image to be saved if the image is
+ multi-platform.
+ If not provided, the full multi-platform image will be saved.
+
+ Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
+ /images/get:
+ get:
+ summary: "Export several images"
+ description: |
+ Get a tarball containing all images and metadata for several image
+ repositories.
+
+ For each value of the `names` parameter: if it is a specific name and
+ tag (e.g. `ubuntu:latest`), then only that image (and its parents) are
+ returned; if it is an image ID, similarly only that image (and its parents)
+ are returned and there would be no names referenced in the 'repositories'
+ file for this image ID.
+
+ For details on the format, see the [export image endpoint](#operation/ImageGet).
+ operationId: "ImageGetAll"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "names"
+ in: "query"
+ description: "Image names to filter by"
+ type: "array"
+ items:
+ type: "string"
+ tags: ["Image"]
+ /images/load:
+ post:
+ summary: "Import images"
+ description: |
+ Load a set of images and tags into a repository.
+
+ For details on the format, see the [export image endpoint](#operation/ImageGet).
+ operationId: "ImageLoad"
+ consumes:
+ - "application/x-tar"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "imagesTarball"
+ in: "body"
+ description: "Tar archive containing images"
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "quiet"
+ in: "query"
+ description: "Suppress progress details during load."
+ type: "boolean"
+ default: false
+ - name: "platform"
+ type: "string"
+ in: "query"
+ description: |
+ JSON encoded OCI platform describing a platform which will be used
+ to select a platform-specific image to be load if the image is
+ multi-platform.
+ If not provided, the full multi-platform image will be loaded.
+
+ Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
+ tags: ["Image"]
+ /containers/{id}/exec:
+ post:
+ summary: "Create an exec instance"
+ description: "Run a command inside a running container."
+ operationId: "ContainerExec"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IDResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "container is paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execConfig"
+ in: "body"
+ description: "Exec configuration"
+ schema:
+ type: "object"
+ title: "ExecConfig"
+ properties:
+ AttachStdin:
+ type: "boolean"
+ description: "Attach to `stdin` of the exec command."
+ AttachStdout:
+ type: "boolean"
+ description: "Attach to `stdout` of the exec command."
+ AttachStderr:
+ type: "boolean"
+ description: "Attach to `stderr` of the exec command."
+ ConsoleSize:
+ type: "array"
+ description: "Initial console size, as an `[height, width]` array."
+ x-nullable: true
+ minItems: 2
+ maxItems: 2
+ items:
+ type: "integer"
+ minimum: 0
+ example: [80, 64]
+ DetachKeys:
+ type: "string"
+ description: |
+ Override the key sequence for detaching a container. Format is
+ a single character `[a-Z]` or `ctrl-<value>` where `<value>`
+ is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ Env:
+ description: |
+ A list of environment variables in the form `["VAR=value", ...]`.
+ type: "array"
+ items:
+ type: "string"
+ Cmd:
+ type: "array"
+ description: "Command to run, as a string or array of strings."
+ items:
+ type: "string"
+ Privileged:
+ type: "boolean"
+ description: "Runs the exec process with extended privileges."
+ default: false
+ User:
+ type: "string"
+ description: |
+ The user, and optionally, group to run the exec process inside
+ the container. Format is one of: `user`, `user:group`, `uid`,
+ or `uid:gid`.
+ WorkingDir:
+ type: "string"
+ description: |
+ The working directory for the exec process inside the container.
+ example:
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ DetachKeys: "ctrl-p,ctrl-q"
+ Tty: false
+ Cmd:
+ - "date"
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ required: true
+ - name: "id"
+ in: "path"
+ description: "ID or name of container"
+ type: "string"
+ required: true
+ tags: ["Exec"]
+ /exec/{id}/start:
+ post:
+ summary: "Start an exec instance"
+ description: |
+ Starts a previously set up exec instance. If detach is true, this endpoint
+ returns immediately after starting the command. Otherwise, it sets up an
+ interactive session with the command.
+ operationId: "ExecStart"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/vnd.docker.multiplexed-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Container is stopped or paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execStartConfig"
+ in: "body"
+ schema:
+ type: "object"
+ title: "ExecStartConfig"
+ properties:
+ Detach:
+ type: "boolean"
+ description: "Detach from the command."
+ example: false
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ example: true
+ ConsoleSize:
+ type: "array"
+ description: "Initial console size, as an `[height, width]` array."
+ x-nullable: true
+ minItems: 2
+ maxItems: 2
+ items:
+ type: "integer"
+ minimum: 0
+ example: [80, 64]
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+ /exec/{id}/resize:
+ post:
+ summary: "Resize an exec instance"
+ description: |
+ Resize the TTY session used by an exec instance. This endpoint only works
+ if `tty` was specified as part of creating and starting the exec instance.
+ operationId: "ExecResize"
+ responses:
+ 200:
+ description: "No error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ - name: "h"
+ in: "query"
+ required: true
+ description: "Height of the TTY session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ required: true
+ description: "Width of the TTY session in characters"
+ type: "integer"
+ tags: ["Exec"]
+ /exec/{id}/json:
+ get:
+ summary: "Inspect an exec instance"
+ description: "Return low-level information about an exec instance."
+ operationId: "ExecInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "ExecInspectResponse"
+ properties:
+ CanRemove:
+ type: "boolean"
+ DetachKeys:
+ type: "string"
+ ID:
+ type: "string"
+ Running:
+ type: "boolean"
+ ExitCode:
+ type: "integer"
+ ProcessConfig:
+ $ref: "#/definitions/ProcessConfig"
+ OpenStdin:
+ type: "boolean"
+ OpenStderr:
+ type: "boolean"
+ OpenStdout:
+ type: "boolean"
+ ContainerID:
+ type: "string"
+ Pid:
+ type: "integer"
+ description: "The system process ID for the exec process."
+ examples:
+ application/json:
+ CanRemove: false
+ ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
+ DetachKeys: ""
+ ExitCode: 2
+ ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
+ OpenStderr: true
+ OpenStdin: true
+ OpenStdout: true
+ ProcessConfig:
+ arguments:
+ - "-c"
+ - "exit 2"
+ entrypoint: "sh"
+ privileged: false
+ tty: true
+ user: "1000"
+ Running: false
+ Pid: 42000
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+
+ /volumes:
+ get:
+ summary: "List volumes"
+ operationId: "VolumeList"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "Summary volume data that matches the query"
+ schema:
+ $ref: "#/definitions/VolumeListResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to
+ process on the volumes list. Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), returns all
+ volumes that are not in use by a container. When set to `false`
+ (or `0`), only volumes that are in use by one or more
+ containers are returned.
+ - `driver=<volume-driver-name>` Matches volumes based on their driver.
+ - `label=<key>` or `label=<key>:<value>` Matches volumes based on
+ the presence of a `label` alone or a `label` and a value.
+ - `name=<volume-name>` Matches all or part of a volume name.
+ type: "string"
+ format: "json"
+ tags: ["Volume"]
+
+ /volumes/create:
+ post:
+ summary: "Create a volume"
+ operationId: "VolumeCreate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 201:
+ description: "The volume was created successfully"
+ schema:
+ $ref: "#/definitions/Volume"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "volumeConfig"
+ in: "body"
+ required: true
+ description: "Volume configuration"
+ schema:
+ $ref: "#/definitions/VolumeCreateOptions"
+ tags: ["Volume"]
+
+ /volumes/{name}:
+ get:
+ summary: "Inspect a volume"
+ operationId: "VolumeInspect"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Volume"
+ 404:
+ description: "No such volume"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ tags: ["Volume"]
+
+ put:
+ summary: |
+ "Update a volume. Valid only for Swarm cluster volumes"
+ operationId: "VolumeUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such volume"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name or ID of the volume"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ # though the schema for is an object that contains only a
+ # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object
+ # means that if, later on, we support things like changing the
+ # labels, we can do so without duplicating that information to the
+ # ClusterVolumeSpec.
+ type: "object"
+ description: "Volume configuration"
+ properties:
+ Spec:
+ $ref: "#/definitions/ClusterVolumeSpec"
+ description: |
+ The spec of the volume to update. Currently, only Availability may
+ change. All other fields must remain unchanged.
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the volume being updated. This is required to
+ avoid conflicting writes. Found in the volume's `ClusterVolume`
+ field.
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Volume"]
+
+ delete:
+ summary: "Remove a volume"
+ description: "Instruct the driver to remove the volume."
+ operationId: "VolumeDelete"
+ responses:
+ 204:
+ description: "The volume was removed"
+ 404:
+ description: "No such volume or volume driver"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Volume is in use and cannot be removed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: "Force the removal of the volume"
+ type: "boolean"
+ default: false
+ tags: ["Volume"]
+
+ /volumes/prune:
+ post:
+ summary: "Delete unused volumes"
+ produces:
+ - "application/json"
+ operationId: "VolumePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
+ - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "VolumePruneResponse"
+ properties:
+ VolumesDeleted:
+ description: "Volumes that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Volume"]
+ /networks:
+ get:
+ summary: "List networks"
+ description: |
+ Returns a list of networks. For details on the format, see the
+ [network inspect endpoint](#operation/NetworkInspect).
+
+ Note that it uses a different, smaller representation of a network than
+ inspecting a single network. For example, the list of containers attached
+ to the network is not propagated in API versions 1.28 and up.
+ operationId: "NetworkList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Network"
+ examples:
+ application/json:
+ - Name: "bridge"
+ Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
+ Created: "2016-10-19T06:21:00.416543526Z"
+ Scope: "local"
+ Driver: "bridge"
+ EnableIPv4: true
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config:
+ -
+ Subnet: "172.17.0.0/16"
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ - Name: "none"
+ Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "null"
+ EnableIPv4: false
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ - Name: "host"
+ Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "host"
+ EnableIPv4: false
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to process
+ on the networks list.
+
+ Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), returns all
+ networks that are not in use by a container. When set to `false`
+ (or `0`), only networks that are in use by one or more
+ containers are returned.
+ - `driver=<driver-name>` Matches a network's driver.
+ - `id=<network-id>` Matches all or part of a network ID.
+ - `label=<key>` or `label=<key>=<value>` of a network label.
+ - `name=<network-name>` Matches all or part of a network name.
+ - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
+ - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
+ type: "string"
+ tags: ["Network"]
+
+ /networks/{id}:
+ get:
+ summary: "Inspect a network"
+ operationId: "NetworkInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Network"
+ 404:
+ description: "Network not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "verbose"
+ in: "query"
+ description: "Detailed inspect output for troubleshooting"
+ type: "boolean"
+ default: false
+ - name: "scope"
+ in: "query"
+ description: "Filter the network by scope (swarm, global, or local)"
+ type: "string"
+ tags: ["Network"]
+
+ delete:
+ summary: "Remove a network"
+ operationId: "NetworkDelete"
+ responses:
+ 204:
+ description: "No error"
+ 403:
+ description: "operation not supported for pre-defined networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such network"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ tags: ["Network"]
+
+ /networks/create:
+ post:
+ summary: "Create a network"
+ operationId: "NetworkCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "Network created successfully"
+ schema:
+ $ref: "#/definitions/NetworkCreateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: |
+ Forbidden operation. This happens when trying to create a network named after a pre-defined network,
+ or when trying to create an overlay network on a daemon which is not part of a Swarm cluster.
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "plugin not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "networkConfig"
+ in: "body"
+ description: "Network configuration"
+ required: true
+ schema:
+ type: "object"
+ title: "NetworkCreateRequest"
+ required: ["Name"]
+ properties:
+ Name:
+ description: "The network's name."
+ type: "string"
+ example: "my_network"
+ Driver:
+ description: "Name of the network driver plugin to use."
+ type: "string"
+ default: "bridge"
+ example: "bridge"
+ Scope:
+ description: |
+ The level at which the network exists (e.g. `swarm` for cluster-wide
+ or `local` for machine level).
+ type: "string"
+ Internal:
+ description: "Restrict external access to the network."
+ type: "boolean"
+ Attachable:
+ description: |
+ Globally scoped network is manually attachable by regular
+ containers from workers in swarm mode.
+ type: "boolean"
+ example: true
+ Ingress:
+ description: |
+ Ingress network is the network which provides the routing-mesh
+ in swarm mode.
+ type: "boolean"
+ example: false
+ ConfigOnly:
+ description: |
+ Creates a config-only network. Config-only networks are placeholder
+ networks for network configurations to be used by other networks.
+ Config-only networks cannot be used directly to run containers
+ or services.
+ type: "boolean"
+ default: false
+ example: false
+ ConfigFrom:
+ description: |
+ Specifies the source which will provide the configuration for
+ this network. The specified network must be an existing
+ config-only network; see ConfigOnly.
+ $ref: "#/definitions/ConfigReference"
+ IPAM:
+ description: "Optional custom IP scheme for the network."
+ $ref: "#/definitions/IPAM"
+ EnableIPv4:
+ description: "Enable IPv4 on the network."
+ type: "boolean"
+ example: true
+ EnableIPv6:
+ description: "Enable IPv6 on the network."
+ type: "boolean"
+ example: true
+ Options:
+ description: "Network specific options to be used by the drivers."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ tags: ["Network"]
+
+ /networks/{id}/connect:
+ post:
+ summary: "Connect a container to a network"
+ description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container"
+ operationId: "NetworkConnect"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "Operation forbidden"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ title: "NetworkConnectRequest"
+ properties:
+ Container:
+ type: "string"
+ description: "The ID or name of the container to connect to the network."
+ EndpointConfig:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ Container: "3613f73ba0e4"
+ EndpointConfig:
+ IPAMConfig:
+ IPv4Address: "172.24.56.89"
+ IPv6Address: "2001:db8::5689"
+ MacAddress: "02:42:ac:12:05:02"
+ Priority: 100
+ tags: ["Network"]
+
+ /networks/{id}/disconnect:
+ post:
+ summary: "Disconnect a container from a network"
+ operationId: "NetworkDisconnect"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ 403:
+ description: "Operation not supported for swarm scoped networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ title: "NetworkDisconnectRequest"
+ properties:
+ Container:
+ type: "string"
+ description: |
+ The ID or name of the container to disconnect from the network.
+ Force:
+ type: "boolean"
+ description: |
+ Force the container to disconnect from the network.
+ tags: ["Network"]
+ /networks/prune:
+ post:
+ summary: "Delete unused networks"
+ produces:
+ - "application/json"
+ operationId: "NetworkPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ title: "NetworkPruneResponse"
+ properties:
+ NetworksDeleted:
+ description: "Networks that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Network"]
+ /plugins:
+ get:
+ summary: "List plugins"
+ operationId: "PluginList"
+ description: "Returns information about installed plugins."
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Plugin"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the plugin list.
+
+ Available filters:
+
+ - `capability=<capability name>`
+ - `enable=<true>|<false>`
+ tags: ["Plugin"]
+
+ /plugins/privileges:
+ get:
+ summary: "Get plugin privileges"
+ operationId: "GetPluginPrivileges"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginPrivilege"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ tags:
+ - "Plugin"
+
+ /plugins/pull:
+ post:
+ summary: "Install a plugin"
+ operationId: "PluginPull"
+ description: |
+ Pulls and installs a plugin. After the plugin is installed, it can be
+ enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference for plugin to install.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "name"
+ in: "query"
+ description: |
+ Local name for the pulled plugin.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: false
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration to use when pulling a plugin
+ from a registry.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginPrivilege"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/{name}/json:
+ get:
+ summary: "Inspect a plugin"
+ operationId: "PluginInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ tags: ["Plugin"]
+ /plugins/{name}:
+ delete:
+ summary: "Remove a plugin"
+ operationId: "PluginDelete"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: |
+ Disable the plugin before removing. This may result in issues if the
+ plugin is in use by a container.
+ type: "boolean"
+ default: false
+ tags: ["Plugin"]
+ /plugins/{name}/enable:
+ post:
+ summary: "Enable a plugin"
+ operationId: "PluginEnable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "timeout"
+ in: "query"
+ description: "Set the HTTP client timeout (in seconds)"
+ type: "integer"
+ default: 0
+ tags: ["Plugin"]
+ /plugins/{name}/disable:
+ post:
+ summary: "Disable a plugin"
+ operationId: "PluginDisable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: |
+ Force disable a plugin even if still in use.
+ required: false
+ type: "boolean"
+ tags: ["Plugin"]
+ /plugins/{name}/upgrade:
+ post:
+ summary: "Upgrade a plugin"
+ operationId: "PluginUpgrade"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference to upgrade to.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration to use when pulling a plugin
+ from a registry.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginPrivilege"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/create:
+ post:
+ summary: "Create a plugin"
+ operationId: "PluginCreate"
+ consumes:
+ - "application/x-tar"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "tarContext"
+ in: "body"
+ description: "Path to tar containing plugin rootfs and manifest"
+ schema:
+ type: "string"
+ format: "binary"
+ tags: ["Plugin"]
+ /plugins/{name}/push:
+ post:
+ summary: "Push a plugin"
+ operationId: "PluginPush"
+ description: |
+ Push a plugin to the registry.
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /plugins/{name}/set:
+ post:
+ summary: "Configure a plugin"
+ operationId: "PluginSet"
+ consumes:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ type: "string"
+ example: ["DEBUG=1"]
+ responses:
+ 204:
+ description: "No error"
+ 404:
+ description: "Plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /nodes:
+ get:
+ summary: "List nodes"
+ operationId: "NodeList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Node"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `id=<node id>`
+ - `label=<engine label>`
+ - `membership=`(`accepted`|`pending`)`
+ - `name=<node name>`
+ - `node.label=<node label>`
+ - `role=`(`manager`|`worker`)`
+ type: "string"
+ tags: ["Node"]
+ /nodes/{id}:
+ get:
+ summary: "Inspect a node"
+ operationId: "NodeInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Node"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ tags: ["Node"]
+ delete:
+ summary: "Delete a node"
+ operationId: "NodeDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Force remove a node from the swarm"
+ default: false
+ type: "boolean"
+ tags: ["Node"]
+ /nodes/{id}/update:
+ post:
+ summary: "Update a node"
+ operationId: "NodeUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID of the node"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/NodeSpec"
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the node object being updated. This is required
+ to avoid conflicting writes.
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Node"]
+ /swarm:
+ get:
+ summary: "Inspect swarm"
+ operationId: "SwarmInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Swarm"
+ 404:
+ description: "no such swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/init:
+ post:
+ summary: "Initialize a new swarm"
+ operationId: "SwarmInit"
+ produces:
+ - "application/json"
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ description: "The node ID"
+ type: "string"
+ example: "7v2t30z9blmxuhnyo6s4cpenp"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ title: "SwarmInitRequest"
+ properties:
+ ListenAddr:
+ description: |
+ Listen address used for inter-manager communication, as well
+ as determining the networking interface used for the VXLAN
+ Tunnel Endpoint (VTEP). This can either be an address/port
+ combination in the form `192.168.1.1:4567`, or an interface
+ followed by a port number, like `eth0:4567`. If the port number
+ is omitted, the default swarm listening port is used.
+ type: "string"
+ AdvertiseAddr:
+ description: |
+ Externally reachable address advertised to other nodes. This
+ can either be an address/port combination in the form
+ `192.168.1.1:4567`, or an interface followed by a port number,
+ like `eth0:4567`. If the port number is omitted, the port
+ number from the listen address is used. If `AdvertiseAddr` is
+ not specified, it will be automatically detected when possible.
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format:
+ `<ip|interface>`), for example, `192.168.1.1`, or an interface,
+ like `eth0`. If `DataPathAddr` is unspecified, the same address
+ as `AdvertiseAddr` is used.
+
+ The `DataPathAddr` specifies the address that global scope
+ network drivers will publish towards other nodes in order to
+ reach the containers running on this node. Using this parameter
+ it is possible to separate the container data traffic from the
+ management traffic of the cluster.
+ type: "string"
+ DataPathPort:
+ description: |
+ DataPathPort specifies the data path port number for data traffic.
+ Acceptable port range is 1024 to 49151.
+ if no port is set or is set to 0, default port 4789 will be used.
+ type: "integer"
+ format: "uint32"
+ DefaultAddrPool:
+ description: |
+ Default Address Pool specifies default subnet pools for global
+ scope networks.
+ type: "array"
+ items:
+ type: "string"
+ example: ["10.10.0.0/16", "20.20.0.0/16"]
+ ForceNewCluster:
+ description: "Force creation of a new swarm."
+ type: "boolean"
+ SubnetSize:
+ description: |
+ SubnetSize specifies the subnet size of the networks created
+ from the default subnet pool.
+ type: "integer"
+ format: "uint32"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ DataPathPort: 4789
+ DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"]
+ SubnetSize: 24
+ ForceNewCluster: false
+ Spec:
+ Orchestration: {}
+ Raft: {}
+ Dispatcher: {}
+ CAConfig: {}
+ EncryptionConfig:
+ AutoLockManagers: false
+ tags: ["Swarm"]
+ /swarm/join:
+ post:
+ summary: "Join an existing swarm"
+ operationId: "SwarmJoin"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ title: "SwarmJoinRequest"
+ properties:
+ ListenAddr:
+ description: |
+ Listen address used for inter-manager communication if the node
+ gets promoted to manager, as well as determining the networking
+ interface used for the VXLAN Tunnel Endpoint (VTEP).
+ type: "string"
+ AdvertiseAddr:
+ description: |
+ Externally reachable address advertised to other nodes. This
+ can either be an address/port combination in the form
+ `192.168.1.1:4567`, or an interface followed by a port number,
+ like `eth0:4567`. If the port number is omitted, the port
+ number from the listen address is used. If `AdvertiseAddr` is
+ not specified, it will be automatically detected when possible.
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format:
+ `<ip|interface>`), for example, `192.168.1.1`, or an interface,
+ like `eth0`. If `DataPathAddr` is unspecified, the same address
+ as `AdvertiseAddr` is used.
+
+ The `DataPathAddr` specifies the address that global scope
+ network drivers will publish towards other nodes in order to
+ reach the containers running on this node. Using this parameter
+ it is possible to separate the container data traffic from the
+ management traffic of the cluster.
+
+ type: "string"
+ RemoteAddrs:
+ description: |
+ Addresses of manager nodes already participating in the swarm.
+ type: "array"
+ items:
+ type: "string"
+ JoinToken:
+ description: "Secret token for joining this swarm."
+ type: "string"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ DataPathAddr: "192.168.1.1"
+ RemoteAddrs:
+ - "node1:2377"
+ JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+ tags: ["Swarm"]
+ /swarm/leave:
+ post:
+ summary: "Leave a swarm"
+ operationId: "SwarmLeave"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "force"
+ description: |
+ Force leave swarm, even if this is the last manager or that it will
+ break the cluster.
+ in: "query"
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/update:
+ post:
+ summary: "Update a swarm"
+ operationId: "SwarmUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ $ref: "#/definitions/SwarmSpec"
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the swarm object being updated. This is
+ required to avoid conflicting writes.
+ type: "integer"
+ format: "int64"
+ required: true
+ - name: "rotateWorkerToken"
+ in: "query"
+ description: "Rotate the worker join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerToken"
+ in: "query"
+ description: "Rotate the manager join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerUnlockKey"
+ in: "query"
+ description: "Rotate the manager unlock key."
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/unlockkey:
+ get:
+ summary: "Get the unlock key"
+ operationId: "SwarmUnlockkey"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ title: "UnlockKeyResponse"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/unlock:
+ post:
+ summary: "Unlock a locked manager"
+ operationId: "SwarmUnlock"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ title: "SwarmUnlockRequest"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /services:
+ get:
+ summary: "List services"
+ operationId: "ServiceList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Service"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the services list.
+
+ Available filters:
+
+ - `id=<service id>`
+ - `label=<service label>`
+ - `mode=["replicated"|"global"]`
+ - `name=<service name>`
+ - name: "status"
+ in: "query"
+ type: "boolean"
+ description: |
+ Include service status, with count of running and desired tasks.
+ tags: ["Service"]
+ /services/create:
+ post:
+ summary: "Create a service"
+ operationId: "ServiceCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ServiceCreateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "network is not eligible for services"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "name conflicts with an existing service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "web"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "nginx:alpine"
+ Mounts:
+ -
+ ReadOnly: true
+ Source: "web-data"
+ Target: "/usr/share/nginx/html"
+ Type: "volume"
+ VolumeOptions:
+ DriverConfig: {}
+ Labels:
+ com.example.something: "something-value"
+ Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
+ User: "33"
+ DNSConfig:
+ Nameservers: ["8.8.8.8"]
+ Search: ["example.org"]
+ Options: ["timeout:3"]
+ Secrets:
+ -
+ File:
+ Name: "www.example.org.key"
+ UID: "33"
+ GID: "33"
+ Mode: 384
+ SecretID: "fpjqlhnwb19zds35k8wn80lq9"
+ SecretName: "example_org_domain_key"
+ OomScoreAdj: 0
+ LogDriver:
+ Name: "json-file"
+ Options:
+ max-file: "3"
+ max-size: "10M"
+ Placement: {}
+ Resources:
+ Limits:
+ MemoryBytes: 104857600
+ Reservations: {}
+ RestartPolicy:
+ Condition: "on-failure"
+ Delay: 10000000000
+ MaxAttempts: 10
+ Mode:
+ Replicated:
+ Replicas: 4
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Ports:
+ -
+ Protocol: "tcp"
+ PublishedPort: 8080
+ TargetPort: 80
+ Labels:
+ foo: "bar"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration for pulling from private
+ registries.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+ tags: ["Service"]
+ /services/{id}:
+ get:
+ summary: "Inspect a service"
+ operationId: "ServiceInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Service"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "insertDefaults"
+ in: "query"
+ description: "Fill empty fields with default values."
+ type: "boolean"
+ default: false
+ tags: ["Service"]
+ delete:
+ summary: "Delete a service"
+ operationId: "ServiceDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ tags: ["Service"]
+ /services/{id}/update:
+ post:
+ summary: "Update a service"
+ operationId: "ServiceUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ServiceUpdateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "top"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "busybox"
+ Args:
+ - "top"
+ OomScoreAdj: 0
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the service object being updated. This is
+ required to avoid conflicting writes.
+ This version number should be the value as currently set on the
+ service *before* the update. You can find the current version by
+ calling `GET /services/{id}`
+ required: true
+ type: "integer"
+ - name: "registryAuthFrom"
+ in: "query"
+ description: |
+ If the `X-Registry-Auth` header is not specified, this parameter
+ indicates where to find registry authorization credentials.
+ type: "string"
+ enum: ["spec", "previous-spec"]
+ default: "spec"
+ - name: "rollback"
+ in: "query"
+ description: |
+ Set to this parameter to `previous` to cause a server-side rollback
+ to the previous service spec. The supplied spec will be ignored in
+ this case.
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: |
+ A base64url-encoded auth configuration for pulling from private
+ registries.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
+ type: "string"
+
+ tags: ["Service"]
+ /services/{id}/logs:
+ get:
+ summary: "Get service logs"
+ description: |
+ Get `stdout` and `stderr` logs from a service. See also
+ [`/containers/{id}/logs`](#operation/ContainerLogs).
+
+ **Note**: This endpoint works only for services with the `local`,
+ `json-file` or `journald` logging drivers.
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/vnd.docker.multiplexed-stream"
+ operationId: "ServiceLogs"
+ responses:
+ 200:
+ description: "logs returned as a stream in response body"
+ schema:
+ type: "string"
+ format: "binary"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such service: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the service"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show service context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: "Keep connection after returning logs."
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
+ type: "string"
+ default: "all"
+ tags: ["Service"]
+ /tasks:
+ get:
+ summary: "List tasks"
+ operationId: "TaskList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Task"
+ example:
+ - ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ - ID: "1yljwbmlr8er2waf8orvqpwms"
+ Version:
+ Index: 30
+ CreatedAt: "2016-06-07T21:07:30.019104782Z"
+ UpdatedAt: "2016-06-07T21:07:30.231958098Z"
+ Name: "hopeful_cori"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:30.202183143Z"
+ State: "shutdown"
+ Message: "shutdown"
+ ContainerStatus:
+ ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
+ DesiredState: "shutdown"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.5/16"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the tasks list.
+
+ Available filters:
+
+ - `desired-state=(running | shutdown | accepted)`
+ - `id=<task id>`
+ - `label=key` or `label="key=value"`
+ - `name=<task name>`
+ - `node=<node id or name>`
+ - `service=<service name>`
+ tags: ["Task"]
+ /tasks/{id}:
+ get:
+ summary: "Inspect a task"
+ operationId: "TaskInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Task"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID of the task"
+ required: true
+ type: "string"
+ tags: ["Task"]
+ /tasks/{id}/logs:
+ get:
+ summary: "Get task logs"
+ description: |
+ Get `stdout` and `stderr` logs from a task.
+ See also [`/containers/{id}/logs`](#operation/ContainerLogs).
+
+ **Note**: This endpoint works only for services with the `local`,
+ `json-file` or `journald` logging drivers.
+ operationId: "TaskLogs"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/vnd.docker.multiplexed-stream"
+ responses:
+ 200:
+ description: "logs returned as a stream in response body"
+ schema:
+ type: "string"
+ format: "binary"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such task: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID of the task"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show task context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: "Keep connection after returning logs."
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
+ type: "string"
+ default: "all"
+ tags: ["Task"]
+ /secrets:
+ get:
+ summary: "List secrets"
+ operationId: "SecretList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Secret"
+ example:
+ - ID: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ Index: 85
+ CreatedAt: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ Name: "mysql-passwd"
+ Labels:
+ some.label: "some.value"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the secrets list.
+
+ Available filters:
+
+ - `id=<secret id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<secret name>`
+ - `names=<secret name>`
+ tags: ["Secret"]
+ /secrets/create:
+ post:
+ summary: "Create a secret"
+ operationId: "SecretCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IDResponse"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/SecretSpec"
+ - type: "object"
+ example:
+ Name: "app-key.crt"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ tags: ["Secret"]
+ /secrets/{id}:
+ get:
+ summary: "Inspect a secret"
+ operationId: "SecretInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Secret"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ delete:
+ summary: "Delete a secret"
+ operationId: "SecretDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ /secrets/{id}/update:
+ post:
+ summary: "Update a Secret"
+ operationId: "SecretUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such secret"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the secret"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/SecretSpec"
+ description: |
+ The spec of the secret to update. Currently, only the Labels field
+ can be updated. All other fields must remain unchanged from the
+ [SecretInspect endpoint](#operation/SecretInspect) response values.
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the secret object being updated. This is
+ required to avoid conflicting writes.
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Secret"]
+ /configs:
+ get:
+ summary: "List configs"
+ operationId: "ConfigList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Config"
+ example:
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "server.conf"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the configs list.
+
+ Available filters:
+
+ - `id=<config id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<config name>`
+ - `names=<config name>`
+ tags: ["Config"]
+ /configs/create:
+ post:
+ summary: "Create a config"
+ operationId: "ConfigCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IDResponse"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ConfigSpec"
+ - type: "object"
+ example:
+ Name: "server.conf"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ tags: ["Config"]
+ /configs/{id}:
+ get:
+ summary: "Inspect a config"
+ operationId: "ConfigInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Config"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ delete:
+ summary: "Delete a config"
+ operationId: "ConfigDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ /configs/{id}/update:
+ post:
+ summary: "Update a Config"
+ operationId: "ConfigUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such config"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the config"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/ConfigSpec"
+ description: |
+ The spec of the config to update. Currently, only the Labels field
+ can be updated. All other fields must remain unchanged from the
+ [ConfigInspect endpoint](#operation/ConfigInspect) response values.
+ - name: "version"
+ in: "query"
+ description: |
+ The version number of the config object being updated. This is
+ required to avoid conflicting writes.
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Config"]
+ /distribution/{name}/json:
+ get:
+ summary: "Get image information from the registry"
+ description: |
+ Return image digest and platform information by contacting the registry.
+ operationId: "DistributionInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "descriptor and platform information"
+ schema:
+ $ref: "#/definitions/DistributionInspect"
+ 401:
+ description: "Failed authentication or no image found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ tags: ["Distribution"]
+ /session:
+ post:
+ summary: "Initialize interactive session"
+ description: |
+ Start a new interactive session with a server. Session allows server to
+ call back to the client for advanced capabilities.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to HTTP2 transport that allows
+ the client to expose gPRC services on that connection.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /session HTTP/1.1
+ Upgrade: h2c
+ Connection: Upgrade
+ ```
+
+ The Docker daemon responds with a `101 UPGRADED` response follow with
+ the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Connection: Upgrade
+ Upgrade: h2c
+ ```
+ operationId: "Session"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 101:
+ description: "no error, hijacking successful"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Session"]
diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
new file mode 100644
index 0000000..bf3463b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
@@ -0,0 +1,23 @@
+package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
+
+import "fmt"
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+ Path string
+ Weight uint16
+}
+
+func (w *WeightDevice) String() string {
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+ Path string
+ Rate uint64
+}
+
+func (t *ThrottleDevice) String() string {
+ return fmt.Sprintf("%s:%d", t.Path, t.Rate)
+}
diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go
new file mode 100644
index 0000000..94a9c0a
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/checkpoint/list.go
@@ -0,0 +1,7 @@
+package checkpoint
+
+// Summary represents the details of a checkpoint when listing endpoints.
+type Summary struct {
+ // Name is the name of the checkpoint.
+ Name string
+}
diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go
new file mode 100644
index 0000000..9477458
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/checkpoint/options.go
@@ -0,0 +1,19 @@
+package checkpoint
+
+// CreateOptions holds parameters to create a checkpoint from a container.
+type CreateOptions struct {
+ CheckpointID string
+ CheckpointDir string
+ Exit bool
+}
+
+// ListOptions holds parameters to list checkpoints for a container.
+type ListOptions struct {
+ CheckpointDir string
+}
+
+// DeleteOptions holds parameters to delete a checkpoint from a container.
+type DeleteOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
new file mode 100644
index 0000000..dce8260
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -0,0 +1,256 @@
+package types // import "github.com/docker/docker/api/types"
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "net"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/registry"
+)
+
+// NewHijackedResponse initializes a [HijackedResponse] type.
+func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
+ return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
+}
+
+// HijackedResponse holds connection information for a hijacked request.
+type HijackedResponse struct {
+ mediaType string
+ Conn net.Conn
+ Reader *bufio.Reader
+}
+
+// Close closes the hijacked connection and reader.
+func (h *HijackedResponse) Close() {
+ h.Conn.Close()
+}
+
+// MediaType let client know if HijackedResponse hold a raw or multiplexed stream.
+// returns false if HTTP Content-Type is not relevant, and container must be inspected
+func (h *HijackedResponse) MediaType() (string, bool) {
+ if h.mediaType == "" {
+ return "", false
+ }
+ return h.mediaType, true
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// CloseWrite closes a readWriter for writing.
+func (h *HijackedResponse) CloseWrite() error {
+ if conn, ok := h.Conn.(CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ NetworkMode string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*container.Ulimit
+ // BuildArgs needs to be a *string instead of just a string so that
+ // we can tell the difference between "" (empty string) and no value
+ // at all (nil). See the parsing of buildArgs in
+ // api/server/router/build/build_routes.go for even more info.
+ BuildArgs map[string]*string
+ AuthConfigs map[string]registry.AuthConfig
+ Context io.Reader
+ Labels map[string]string
+ // squash the resulting image's layers to the parent
+ // preserves the original image and creates a new one from the parent with all
+ // the changes applied to a single layer
+ Squash bool
+ // CacheFrom specifies images that are used for matching cache. Images
+ // specified here do not need to have a valid parent chain to match cache.
+ CacheFrom []string
+ SecurityOpt []string
+ ExtraHosts []string // List of extra hosts
+ Target string
+ SessionID string
+ Platform string
+ // Version specifies the version of the underlying builder to use
+ Version BuilderVersion
+ // BuildID is an optional identifier that can be passed together with the
+ // build request. The same identifier can be used to gracefully cancel the
+ // build with the cancel request.
+ BuildID string
+ // Outputs defines configurations for exporting build results. Only supported
+ // in BuildKit mode
+ Outputs []ImageBuildOutput
+}
+
+// ImageBuildOutput defines configuration for exporting a build result
+type ImageBuildOutput struct {
+ Type string
+ Attrs map[string]string
+}
+
+// BuilderVersion sets the version of underlying builder to use
+type BuilderVersion string
+
+const (
+ // BuilderV1 is the first generation builder in docker daemon
+ BuilderV1 BuilderVersion = "1"
+ // BuilderBuildKit is builder based on moby/buildkit project
+ BuilderBuildKit BuilderVersion = "2"
+)
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filters filters.Args
+}
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+type NodeRemoveOptions struct {
+ Force bool
+}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// Values for RegistryAuthFrom in ServiceUpdateOptions
+const (
+ RegistryAuthFromSpec = "spec"
+ RegistryAuthFromPreviousSpec = "previous-spec"
+)
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+
+ // RegistryAuthFrom specifies where to find the registry authorization
+ // credentials if they are not given in EncodedRegistryAuth. Valid
+ // values are "spec" and "previous-spec".
+ RegistryAuthFrom string
+
+ // Rollback indicates whether a server-side rollback should be
+ // performed. When this is set, the provided spec will be ignored.
+ // The valid values are "previous" and "none". An empty value is the
+ // same as "none".
+ Rollback string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filters filters.Args
+
+ // Status indicates whether the server should include the service task
+ // count of running and desired tasks.
+ Status bool
+}
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+ InsertDefaults bool
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filters filters.Args
+}
+
+// PluginRemoveOptions holds parameters to remove plugins.
+type PluginRemoveOptions struct {
+ Force bool
+}
+
+// PluginEnableOptions holds parameters to enable plugins.
+type PluginEnableOptions struct {
+ Timeout int
+}
+
+// PluginDisableOptions holds parameters to disable plugins.
+type PluginDisableOptions struct {
+ Force bool
+}
+
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+ Disabled bool
+ AcceptAllPermissions bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ RemoteRef string // RemoteRef is the plugin name on the registry
+
+ // PrivilegeFunc is a function that clients can supply to retry operations
+ // after getting an authorization error. This function returns the registry
+ // authentication header value in base64 encoded format, or an error if the
+ // privilege request fails.
+ //
+ // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
+ PrivilegeFunc func(context.Context) (string, error)
+ AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
+ Args []string
+}
+
+// SwarmUnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+type SwarmUnlockKeyResponse struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// PluginCreateOptions hold all options to plugin create.
+type PluginCreateOptions struct {
+ RepoName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/common/id_response.go b/vendor/github.com/docker/docker/api/types/common/id_response.go
new file mode 100644
index 0000000..22e8c60
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/common/id_response.go
@@ -0,0 +1,13 @@
+package common
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// IDResponse Response to an API call that returns just an Id
+// swagger:model IDResponse
+type IDResponse struct {
+
+ // The id of the newly created object.
+ // Required: true
+ ID string `json:"Id"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go
new file mode 100644
index 0000000..fe8d6d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/change_type.go
@@ -0,0 +1,15 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ChangeType Kind of change
+//
+// Can be one of:
+//
+// - `0`: Modified ("C")
+// - `1`: Added ("A")
+// - `2`: Deleted ("D")
+//
+// swagger:model ChangeType
+type ChangeType uint8
diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go
new file mode 100644
index 0000000..3a3a838
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/change_types.go
@@ -0,0 +1,23 @@
+package container
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify ChangeType = 0
+ // ChangeAdd represents the add operation.
+ ChangeAdd ChangeType = 1
+ // ChangeDelete represents the delete operation.
+ ChangeDelete ChangeType = 2
+)
+
+func (ct ChangeType) String() string {
+ switch ct {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/commit.go b/vendor/github.com/docker/docker/api/types/container/commit.go
new file mode 100644
index 0000000..6fd1b0e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/commit.go
@@ -0,0 +1,7 @@
+package container
+
+import "github.com/docker/docker/api/types/common"
+
+// CommitResponse response for the commit API call, containing the ID of the
+// image that was produced.
+type CommitResponse = common.IDResponse
diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go
new file mode 100644
index 0000000..d6b03e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/config.go
@@ -0,0 +1,73 @@
+package container // import "github.com/docker/docker/api/types/container"
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
+)
+
+// MinimumDuration puts a minimum on user configured duration.
+// This is to prevent API error on time unit. For example, API may
+// set 3 as healthcheck interval with intention of 3 seconds, but
+// Docker interprets it as 3 nanoseconds.
+const MinimumDuration = 1 * time.Millisecond
+
+// StopOptions holds the options to stop or restart a container.
+type StopOptions struct {
+ // Signal (optional) is the signal to send to the container to (gracefully)
+ // stop it before forcibly terminating the container with SIGKILL after the
+ // timeout expires. If not value is set, the default (SIGTERM) is used.
+ Signal string `json:",omitempty"`
+
+ // Timeout (optional) is the timeout (in seconds) to wait for the container
+ // to stop gracefully before forcibly terminating it with SIGKILL.
+ //
+ // - Use nil to use the default timeout (10 seconds).
+ // - Use '-1' to wait indefinitely.
+ // - Use '0' to not wait for the container to exit gracefully, and
+ // immediately proceeds to forcibly terminating the container.
+ // - Other positive values are used as timeout (in seconds).
+ Timeout *int `json:",omitempty"`
+}
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig = dockerspec.HealthcheckConfig
+
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ // Mac Address of the container.
+ //
+ // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead.
+ MacAddress string `json:",omitempty"`
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go
new file mode 100644
index 0000000..65fabbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container.go
@@ -0,0 +1,188 @@
+package container
+
+import (
+ "io"
+ "os"
+ "time"
+
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/storage"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ContainerUpdateOKBody OK response to ContainerUpdate operation
+//
+// Deprecated: use [UpdateResponse]. This alias will be removed in the next release.
+type ContainerUpdateOKBody = UpdateResponse
+
+// ContainerTopOKBody OK response to ContainerTop operation
+//
+// Deprecated: use [TopResponse]. This alias will be removed in the next release.
+type ContainerTopOKBody = TopResponse
+
+// PruneReport contains the response for Engine API:
+// POST "/containers/prune"
+type PruneReport struct {
+ ContainersDeleted []string
+ SpaceReclaimed uint64
+}
+
+// PathStat is used to encode the header from
+// GET "/containers/{name:.*}/archive"
+// "Name" is the file or directory name.
+type PathStat struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ Mtime time.Time `json:"mtime"`
+ LinkTarget string `json:"linkTarget"`
+}
+
+// CopyToContainerOptions holds information
+// about files to copy into a container
+type CopyToContainerOptions struct {
+ AllowOverwriteDirWithFile bool
+ CopyUIDGID bool
+}
+
+// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
+// for a container, as produced by the GET "/stats" endpoint.
+//
+// The OSType field is set to the server's platform to allow
+// platform-specific handling of the response.
+//
+// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
+type StatsResponseReader struct {
+ Body io.ReadCloser `json:"body"`
+ OSType string `json:"ostype"`
+}
+
+// MountPoint represents a mount point configuration inside the container.
+// This is used for reporting the mountpoints in use by a container.
+type MountPoint struct {
+ // Type is the type of mount, see `Type<foo>` definitions in
+ // github.com/docker/docker/api/types/mount.Type
+ Type mount.Type `json:",omitempty"`
+
+ // Name is the name reference to the underlying data defined by `Source`
+ // e.g., the volume name.
+ Name string `json:",omitempty"`
+
+ // Source is the source location of the mount.
+ //
+ // For volumes, this contains the storage location of the volume (within
+ // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
+ // the source (host) part of the bind-mount. For `tmpfs` mount points, this
+ // field is empty.
+ Source string
+
+ // Destination is the path relative to the container root (`/`) where the
+ // Source is mounted inside the container.
+ Destination string
+
+ // Driver is the volume driver used to create the volume (if it is a volume).
+ Driver string `json:",omitempty"`
+
+ // Mode is a comma separated list of options supplied by the user when
+ // creating the bind/volume mount.
+ //
+ // The default is platform-specific (`"z"` on Linux, empty on Windows).
+ Mode string
+
+ // RW indicates whether the mount is mounted writable (read-write).
+ RW bool
+
+ // Propagation describes how mounts are propagated from the host into the
+ // mount point, and vice-versa. Refer to the Linux kernel documentation
+ // for details:
+ // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
+ //
+ // This field is not used on Windows.
+ Propagation mount.Propagation
+}
+
+// State stores container's running state
+// it's part of ContainerJSONBase and returned by "inspect" command
+type State struct {
+ Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
+ Running bool
+ Paused bool
+ Restarting bool
+ OOMKilled bool
+ Dead bool
+ Pid int
+ ExitCode int
+ Error string
+ StartedAt string
+ FinishedAt string
+ Health *Health `json:",omitempty"`
+}
+
+// Summary contains response of Engine API:
+// GET "/containers/json"
+type Summary struct {
+ ID string `json:"Id"`
+ Names []string
+ Image string
+ ImageID string
+ ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"`
+ Command string
+ Created int64
+ Ports []Port
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Labels map[string]string
+ State string
+ Status string
+ HostConfig struct {
+ NetworkMode string `json:",omitempty"`
+ Annotations map[string]string `json:",omitempty"`
+ }
+ NetworkSettings *NetworkSettingsSummary
+ Mounts []MountPoint
+}
+
+// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json"
+// for API version 1.18 and older.
+//
+// TODO(thaJeztah): combine ContainerJSONBase and InspectResponse into a single struct.
+// The split between ContainerJSONBase (ContainerJSONBase) and InspectResponse (InspectResponse)
+// was done in commit 6deaa58ba5f051039643cedceee97c8695e2af74 (https://github.com/moby/moby/pull/13675).
+// ContainerJSONBase contained all fields for API < 1.19, and InspectResponse
+// held fields that were added in API 1.19 and up. Given that the minimum
+// supported API version is now 1.24, we no longer use the separate type.
+type ContainerJSONBase struct {
+ ID string `json:"Id"`
+ Created string
+ Path string
+ Args []string
+ State *State
+ Image string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Name string
+ RestartCount int
+ Driver string
+ Platform string
+ MountLabel string
+ ProcessLabel string
+ AppArmorProfile string
+ ExecIDs []string
+ HostConfig *HostConfig
+ GraphDriver storage.DriverData
+ SizeRw *int64 `json:",omitempty"`
+ SizeRootFs *int64 `json:",omitempty"`
+}
+
+// InspectResponse is the response for the GET "/containers/{name:.*}/json"
+// endpoint.
+type InspectResponse struct {
+ *ContainerJSONBase
+ Mounts []MountPoint
+ Config *Config
+ NetworkSettings *NetworkSettings
+ // ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container.
+ ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/create_request.go b/vendor/github.com/docker/docker/api/types/container/create_request.go
new file mode 100644
index 0000000..e98dd6a
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/create_request.go
@@ -0,0 +1,13 @@
+package container
+
+import "github.com/docker/docker/api/types/network"
+
+// CreateRequest is the request message sent to the server for container
+// create calls. It is a config wrapper that holds the container [Config]
+// (portable) and the corresponding [HostConfig] (non-portable) and
+// [network.NetworkingConfig].
+type CreateRequest struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty"`
+ NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go
new file mode 100644
index 0000000..aa0e7f7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/create_response.go
@@ -0,0 +1,19 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// CreateResponse ContainerCreateResponse
+//
+// OK response to ContainerCreate operation
+// swagger:model CreateResponse
+type CreateResponse struct {
+
+ // The ID of the created container
+ // Required: true
+ ID string `json:"Id"`
+
+ // Warnings encountered when creating the container
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go
new file mode 100644
index 0000000..32c9780
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/errors.go
@@ -0,0 +1,9 @@
+package container
+
+type errInvalidParameter struct{ error }
+
+func (e *errInvalidParameter) InvalidParameter() {}
+
+func (e *errInvalidParameter) Unwrap() error {
+ return e.error
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go
new file mode 100644
index 0000000..f4b2237
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/exec.go
@@ -0,0 +1,51 @@
+package container
+
+import "github.com/docker/docker/api/types/common"
+
+// ExecCreateResponse is the response for a successful exec-create request.
+// It holds the ID of the exec that was created.
+//
+// TODO(thaJeztah): make this a distinct type.
+type ExecCreateResponse = common.IDResponse
+
+// ExecOptions is a small subset of the Config struct that holds the configuration
+// for the exec feature of docker.
+type ExecOptions struct {
+ User string // User that will run the command
+ Privileged bool // Is the container in privileged mode
+ Tty bool // Attach standard streams to a tty.
+ ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStderr bool // Attach the standard error
+ AttachStdout bool // Attach the standard output
+ Detach bool // Execute in detach mode
+ DetachKeys string // Escape keys for detach
+ Env []string // Environment variables
+ WorkingDir string // Working directory
+ Cmd []string // Execution commands and args
+}
+
+// ExecStartOptions is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartOptions struct {
+ // ExecStart will first check if it's detached
+ Detach bool
+ // Check if there's a tty
+ Tty bool
+ // Terminal size [height, width], unused if Tty == false
+ ConsoleSize *[2]uint `json:",omitempty"`
+}
+
+// ExecAttachOptions is a temp struct used by execAttach.
+//
+// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
+type ExecAttachOptions = ExecStartOptions
+
+// ExecInspect holds information returned by exec inspect.
+type ExecInspect struct {
+ ExecID string `json:"ID"`
+ ContainerID string
+ Running bool
+ ExitCode int
+ Pid int
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go
new file mode 100644
index 0000000..9e9c2ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go
@@ -0,0 +1,19 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// FilesystemChange Change in the container's filesystem.
+//
+// swagger:model FilesystemChange
+type FilesystemChange struct {
+
+ // kind
+ // Required: true
+ Kind ChangeType `json:"Kind"`
+
+ // Path to file or directory that has changed.
+ //
+ // Required: true
+ Path string `json:"Path"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/health.go b/vendor/github.com/docker/docker/api/types/container/health.go
new file mode 100644
index 0000000..9366374
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/health.go
@@ -0,0 +1,26 @@
+package container
+
+import "time"
+
+// Health states
+const (
+ NoHealthcheck = "none" // Indicates there is no healthcheck
+ Starting = "starting" // Starting indicates that the container is not yet ready
+ Healthy = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+)
+
+// Health stores information about the container's healthcheck results
+type Health struct {
+ Status string // Status is one of [Starting], [Healthy] or [Unhealthy].
+ FailingStreak int // FailingStreak is the number of consecutive failures
+ Log []*HealthcheckResult // Log contains the last few results (oldest first)
+}
+
+// HealthcheckResult stores information about a single run of a healthcheck probe
+type HealthcheckResult struct {
+ Start time.Time // Start is the time this check started
+ End time.Time // End is the time this check ended
+ ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
+ Output string // Output from last check
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
new file mode 100644
index 0000000..8319830
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
@@ -0,0 +1,501 @@
+package container // import "github.com/docker/docker/api/types/container"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/types/blkiodev"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ "github.com/docker/go-units"
+)
+
+// CgroupnsMode represents the cgroup namespace mode of the container
+type CgroupnsMode string
+
+// cgroup namespace modes for containers
+const (
+ CgroupnsModeEmpty CgroupnsMode = ""
+ CgroupnsModePrivate CgroupnsMode = "private"
+ CgroupnsModeHost CgroupnsMode = "host"
+)
+
+// IsPrivate indicates whether the container uses its own private cgroup namespace
+func (c CgroupnsMode) IsPrivate() bool {
+ return c == CgroupnsModePrivate
+}
+
+// IsHost indicates whether the container shares the host's cgroup namespace
+func (c CgroupnsMode) IsHost() bool {
+ return c == CgroupnsModeHost
+}
+
+// IsEmpty indicates whether the container cgroup namespace mode is unset
+func (c CgroupnsMode) IsEmpty() bool {
+ return c == CgroupnsModeEmpty
+}
+
+// Valid indicates whether the cgroup namespace mode is valid
+func (c CgroupnsMode) Valid() bool {
+ return c.IsEmpty() || c.IsPrivate() || c.IsHost()
+}
+
+// Isolation represents the isolation technology of a container. The supported
+// values are platform specific
+type Isolation string
+
+// Isolation modes for containers
+const (
+ IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default)
+ IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon
+ IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode
+ IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode
+)
+
+// IsDefault indicates the default isolation technology of a container. On Linux this
+// is the native driver. On Windows, this is a Windows Server Container.
+func (i Isolation) IsDefault() bool {
+ // TODO consider making isolation-mode strict (case-sensitive)
+ v := Isolation(strings.ToLower(string(i)))
+ return v == IsolationDefault || v == IsolationEmpty
+}
+
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+ // TODO consider making isolation-mode strict (case-sensitive)
+ return Isolation(strings.ToLower(string(i))) == IsolationHyperV
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+ // TODO consider making isolation-mode strict (case-sensitive)
+ return Isolation(strings.ToLower(string(i))) == IsolationProcess
+}
+
+// IpcMode represents the container ipc stack.
+type IpcMode string
+
+// IpcMode constants
+const (
+ IPCModeNone IpcMode = "none"
+ IPCModeHost IpcMode = "host"
+ IPCModeContainer IpcMode = "container"
+ IPCModePrivate IpcMode = "private"
+ IPCModeShareable IpcMode = "shareable"
+)
+
+// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
+func (n IpcMode) IsPrivate() bool {
+ return n == IPCModePrivate
+}
+
+// IsHost indicates whether the container shares the host's ipc namespace.
+func (n IpcMode) IsHost() bool {
+ return n == IPCModeHost
+}
+
+// IsShareable indicates whether the container's ipc namespace can be shared with another container.
+func (n IpcMode) IsShareable() bool {
+ return n == IPCModeShareable
+}
+
+// IsContainer indicates whether the container uses another container's ipc namespace.
+func (n IpcMode) IsContainer() bool {
+ _, ok := containerID(string(n))
+ return ok
+}
+
+// IsNone indicates whether container IpcMode is set to "none".
+func (n IpcMode) IsNone() bool {
+ return n == IPCModeNone
+}
+
+// IsEmpty indicates whether container IpcMode is empty
+func (n IpcMode) IsEmpty() bool {
+ return n == ""
+}
+
+// Valid indicates whether the ipc mode is valid.
+func (n IpcMode) Valid() bool {
+ // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
+ return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
+}
+
+// Container returns the name of the container ipc stack is going to be used.
+func (n IpcMode) Container() (idOrName string) {
+ idOrName, _ = containerID(string(n))
+ return idOrName
+}
+
+// NetworkMode represents the container network stack.
+type NetworkMode string
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == network.NetworkNone
+}
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == network.NetworkDefault
+}
+
+// IsPrivate indicates whether container uses its private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsContainer indicates whether container uses a container network stack.
+func (n NetworkMode) IsContainer() bool {
+ _, ok := containerID(string(n))
+ return ok
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+func (n NetworkMode) ConnectedContainer() (idOrName string) {
+ idOrName, _ = containerID(string(n))
+ return idOrName
+}
+
+// UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
+
+// UsernsMode represents userns mode in the container.
+type UsernsMode string
+
+// IsHost indicates whether the container uses the host's userns.
+func (n UsernsMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsPrivate indicates whether the container uses the a private userns.
+func (n UsernsMode) IsPrivate() bool {
+ return !n.IsHost()
+}
+
+// Valid indicates whether the userns is valid.
+func (n UsernsMode) Valid() bool {
+ return n == "" || n.IsHost()
+}
+
+// CgroupSpec represents the cgroup to use for the container.
+type CgroupSpec string
+
+// IsContainer indicates whether the container is using another container cgroup
+func (c CgroupSpec) IsContainer() bool {
+ _, ok := containerID(string(c))
+ return ok
+}
+
+// Valid indicates whether the cgroup spec is valid.
+func (c CgroupSpec) Valid() bool {
+ // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
+ return c == "" || c.IsContainer()
+}
+
+// Container returns the ID or name of the container whose cgroup will be used.
+func (c CgroupSpec) Container() (idOrName string) {
+ idOrName, _ = containerID(string(c))
+ return idOrName
+}
+
+// UTSMode represents the UTS namespace of the container.
+type UTSMode string
+
+// IsPrivate indicates whether the container uses its private UTS namespace.
+func (n UTSMode) IsPrivate() bool {
+ return !n.IsHost()
+}
+
+// IsHost indicates whether the container uses the host's UTS namespace.
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+// Valid indicates whether the UTS namespace is valid.
+func (n UTSMode) Valid() bool {
+ return n == "" || n.IsHost()
+}
+
+// PidMode represents the pid namespace of the container.
+type PidMode string
+
+// IsPrivate indicates whether the container uses its own new pid namespace.
+func (n PidMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's pid namespace.
+func (n PidMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's pid namespace.
+func (n PidMode) IsContainer() bool {
+ _, ok := containerID(string(n))
+ return ok
+}
+
+// Valid indicates whether the pid namespace is valid.
+func (n PidMode) Valid() bool {
+ return n == "" || n.IsHost() || validContainer(string(n))
+}
+
+// Container returns the name of the container whose pid namespace is going to be used.
+func (n PidMode) Container() (idOrName string) {
+ idOrName, _ = containerID(string(n))
+ return idOrName
+}
+
+// DeviceRequest represents a request for devices from a device driver.
+// Used by GPU device drivers.
+type DeviceRequest struct {
+ Driver string // Name of device driver
+ Count int // Number of devices to request (-1 = All)
+ DeviceIDs []string // List of device IDs as recognizable by the device driver
+ Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
+ Options map[string]string // Options to pass onto the device driver
+}
+
+// DeviceMapping represents the device mapping between the host and the container.
+type DeviceMapping struct {
+ PathOnHost string
+ PathInContainer string
+ CgroupPermissions string
+}
+
+// RestartPolicy represents the restart policies of the container.
+type RestartPolicy struct {
+ Name RestartPolicyMode
+ MaximumRetryCount int
+}
+
+type RestartPolicyMode string
+
+const (
+ RestartPolicyDisabled RestartPolicyMode = "no"
+ RestartPolicyAlways RestartPolicyMode = "always"
+ RestartPolicyOnFailure RestartPolicyMode = "on-failure"
+ RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped"
+)
+
+// IsNone indicates whether the container has the "no" restart policy.
+// This means the container will not automatically restart when exiting.
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == RestartPolicyDisabled || rp.Name == ""
+}
+
+// IsAlways indicates whether the container has the "always" restart policy.
+// This means the container will automatically restart regardless of the exit status.
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == RestartPolicyAlways
+}
+
+// IsOnFailure indicates whether the container has the "on-failure" restart policy.
+// This means the container will automatically restart of exiting with a non-zero exit status.
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == RestartPolicyOnFailure
+}
+
+// IsUnlessStopped indicates whether the container has the
+// "unless-stopped" restart policy. This means the container will
+// automatically restart unless user has put it to stopped state.
+func (rp *RestartPolicy) IsUnlessStopped() bool {
+ return rp.Name == RestartPolicyUnlessStopped
+}
+
+// IsSame compares two RestartPolicy to see if they are the same
+func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
+ return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
+}
+
+// ValidateRestartPolicy validates the given RestartPolicy.
+func ValidateRestartPolicy(policy RestartPolicy) error {
+ switch policy.Name {
+ case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled:
+ if policy.MaximumRetryCount != 0 {
+ msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'"
+ if policy.MaximumRetryCount < 0 {
+ msg += " and cannot be negative"
+ }
+ return &errInvalidParameter{errors.New(msg)}
+ }
+ return nil
+ case RestartPolicyOnFailure:
+ if policy.MaximumRetryCount < 0 {
+ return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
+ }
+ return nil
+ case "":
+ // Versions before v25.0.0 created an empty restart-policy "name" as
+ // default. Allow an empty name with "any" MaximumRetryCount for
+ // backward-compatibility.
+ return nil
+ default:
+ return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)}
+ }
+}
+
+// LogMode is a type to define the available modes for logging
+// These modes affect how logs are handled when log messages start piling up.
+type LogMode string
+
+// Available logging modes
+const (
+ LogModeUnset LogMode = ""
+ LogModeBlocking LogMode = "blocking"
+ LogModeNonBlock LogMode = "non-blocking"
+)
+
+// LogConfig represents the logging configuration of the container.
+type LogConfig struct {
+ Type string
+ Config map[string]string
+}
+
+// Ulimit is an alias for [units.Ulimit], which may be moving to a different
+// location or become a local type. This alias is to help transitioning.
+//
+// Users are recommended to use this alias instead of using [units.Ulimit] directly.
+type Ulimit = units.Ulimit
+
+// Resources contains container's resources (cgroups config, ulimits...)
+type Resources struct {
+ // Applicable to all platforms
+ CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+ NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
+
+ // Applicable to UNIX platforms
+ CgroupParent string // Parent cgroup.
+ BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
+ BlkioWeightDevice []*blkiodev.WeightDevice
+ BlkioDeviceReadBps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
+ BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
+ CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
+ CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
+ CpusetCpus string // CpusetCpus 0-2, 0,1
+ CpusetMems string // CpusetMems 0-2, 0,1
+ Devices []DeviceMapping // List of devices to map inside the container
+ DeviceCgroupRules []string // List of rule to be added to the device cgroup
+ DeviceRequests []DeviceRequest // List of device requests for device drivers
+
+ // KernelMemory specifies the kernel memory limit (in bytes) for the container.
+ // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes.
+ KernelMemory int64 `json:",omitempty"`
+ KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
+ MemoryReservation int64 // Memory soft limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwappiness *int64 // Tuning container memory swappiness behaviour
+ OomKillDisable *bool // Whether to disable OOM Killer or not
+ PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
+ Ulimits []*Ulimit // List of ulimits to be set in the container
+
+ // Applicable to Windows
+ CPUCount int64 `json:"CpuCount"` // CPU count
+ CPUPercent int64 `json:"CpuPercent"` // CPU percent
+ IOMaximumIOps uint64 // Maximum IOps for the container system drive
+ IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
+}
+
+// UpdateConfig holds the mutable attributes of a Container.
+// Those attributes can be updated at runtime.
+type UpdateConfig struct {
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+ RestartPolicy RestartPolicy
+}
+
+// HostConfig the non-portable Config structure of a container.
+// Here, "non-portable" means "dependent of the host we are running on".
+// Portable information *should* appear in Config.
+type HostConfig struct {
+ // Applicable to all platforms
+ Binds []string // List of volume bindings for this container
+ ContainerIDFile string // File (path) where the containerId is written
+ LogConfig LogConfig // Configuration of the logs for this container
+ NetworkMode NetworkMode // Network mode to use for the container
+ PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
+ RestartPolicy RestartPolicy // Restart policy to be used for the container
+ AutoRemove bool // Automatically remove container when it exits
+ VolumeDriver string // Name of the volume driver used to mount volumes
+ VolumesFrom []string // List of volumes to take from other container
+ ConsoleSize [2]uint // Initial console size (height,width)
+ Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime
+
+ // Applicable to UNIX platforms
+ CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
+ CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
+ CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
+ DNS []string `json:"Dns"` // List of DNS server to lookup
+ DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
+ DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
+ ExtraHosts []string // List of extra hosts
+ GroupAdd []string // List of additional groups that the container process will run as
+ IpcMode IpcMode // IPC namespace to use for the container
+ Cgroup CgroupSpec // Cgroup to use for the container
+ Links []string // List of links (in the name:alias form)
+ OomScoreAdj int // Container preference for OOM-killing
+ PidMode PidMode // PID namespace to use for the container
+ Privileged bool // Is the container in privileged mode
+ PublishAllPorts bool // Should docker publish all exposed port for the container
+ ReadonlyRootfs bool // Is the container root filesystem in read-only
+ SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
+ StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
+ Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
+ UTSMode UTSMode // UTS namespace to use for the container
+ UsernsMode UsernsMode // The user namespace to use for the container
+ ShmSize int64 // Total shm memory usage
+ Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
+ Runtime string `json:",omitempty"` // Runtime to use with this container
+
+ // Applicable to Windows
+ Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
+
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+
+ // Mounts specs used by the container
+ Mounts []mount.Mount `json:",omitempty"`
+
+ // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths)
+ MaskedPaths []string
+
+ // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths)
+ ReadonlyPaths []string
+
+ // Run a custom init inside the container, if null, use the daemon's configured settings
+ Init *bool `json:",omitempty"`
+}
+
+// containerID splits "container:<ID|name>" values. It returns the container
+// ID or name, and whether an ID/name was found. It returns an empty string and
+// a "false" if the value does not have a "container:" prefix. Further validation
+// of the returned, including checking if the value is empty, should be handled
+// by the caller.
+func containerID(val string) (idOrName string, ok bool) {
+ k, v, hasSep := strings.Cut(val, ":")
+ if !hasSep || k != "container" {
+ return "", false
+ }
+ return v, true
+}
+
+// validContainer checks if the given value is a "container:" mode with
+// a non-empty name/ID.
+func validContainer(val string) bool {
+ id, ok := containerID(val)
+ return ok && id != ""
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
new file mode 100644
index 0000000..cdee49e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
@@ -0,0 +1,45 @@
+//go:build !windows
+
+package container // import "github.com/docker/docker/api/types/container"
+
+import "github.com/docker/docker/api/types/network"
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault()
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+func (n NetworkMode) IsBridge() bool {
+ return n == network.NetworkBridge
+}
+
+// IsHost indicates whether container uses the host network stack.
+func (n NetworkMode) IsHost() bool {
+ return n == network.NetworkHost
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ switch {
+ case n.IsDefault():
+ return network.NetworkDefault
+ case n.IsBridge():
+ return network.NetworkBridge
+ case n.IsHost():
+ return network.NetworkHost
+ case n.IsNone():
+ return network.NetworkNone
+ case n.IsContainer():
+ return "container"
+ case n.IsUserDefined():
+ return n.UserDefined()
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
new file mode 100644
index 0000000..f085455
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
@@ -0,0 +1,47 @@
+package container // import "github.com/docker/docker/api/types/container"
+
+import "github.com/docker/docker/api/types/network"
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault() || i.IsHyperV() || i.IsProcess()
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+// in windows it is given the name NAT
+func (n NetworkMode) IsBridge() bool {
+ return n == network.NetworkNat
+}
+
+// IsHost indicates whether container uses the host network stack.
+// returns false as this is not supported by windows
+func (n NetworkMode) IsHost() bool {
+ return false
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ switch {
+ case n.IsDefault():
+ return network.NetworkDefault
+ case n.IsBridge():
+ return network.NetworkNat
+ case n.IsHost():
+ // Windows currently doesn't support host network-mode, so
+ // this would currently never happen..
+ return network.NetworkHost
+ case n.IsNone():
+ return network.NetworkNone
+ case n.IsContainer():
+ return "container"
+ case n.IsUserDefined():
+ return n.UserDefined()
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go
new file mode 100644
index 0000000..afec0e5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/network_settings.go
@@ -0,0 +1,56 @@
+package container
+
+import (
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/go-connections/nat"
+)
+
+// NetworkSettings exposes the network settings in the api
+type NetworkSettings struct {
+ NetworkSettingsBase
+ DefaultNetworkSettings
+ Networks map[string]*network.EndpointSettings
+}
+
+// NetworkSettingsBase holds networking state for a container when inspecting it.
+type NetworkSettingsBase struct {
+ Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag.
+ SandboxID string // SandboxID uniquely represents a container's network stack
+ SandboxKey string // SandboxKey identifies the sandbox
+ Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ //
+ // Deprecated: This field is never set and will be removed in a future release.
+ HairpinMode bool
+ // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
+ //
+ // Deprecated: This field is never set and will be removed in a future release.
+ LinkLocalIPv6Address string
+ // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
+ //
+ // Deprecated: This field is never set and will be removed in a future release.
+ LinkLocalIPv6PrefixLen int
+ SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
+ SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
+}
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+type DefaultNetworkSettings struct {
+ EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
+ Gateway string // Gateway holds the gateway address for the network
+ GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
+ GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
+ IPAddress string // IPAddress holds the IPv4 address for the network
+ IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
+ IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
+ MacAddress string // MacAddress holds the MAC address for the network
+}
+
+// NetworkSettingsSummary provides a summary of container's networks
+// in /containers/json
+type NetworkSettingsSummary struct {
+ Networks map[string]*network.EndpointSettings
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go
new file mode 100644
index 0000000..7a23005
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/options.go
@@ -0,0 +1,67 @@
+package container
+
+import "github.com/docker/docker/api/types/filters"
+
+// ResizeOptions holds parameters to resize a TTY.
+// It can be used to resize container TTYs and
+// exec process TTYs too.
+type ResizeOptions struct {
+ Height uint
+ Width uint
+}
+
+// AttachOptions holds parameters to attach to a container.
+type AttachOptions struct {
+ Stream bool
+ Stdin bool
+ Stdout bool
+ Stderr bool
+ DetachKeys string
+ Logs bool
+}
+
+// CommitOptions holds parameters to commit changes into a container.
+type CommitOptions struct {
+ Reference string
+ Comment string
+ Author string
+ Changes []string
+ Pause bool
+ Config *Config
+}
+
+// RemoveOptions holds parameters to remove containers.
+type RemoveOptions struct {
+ RemoveVolumes bool
+ RemoveLinks bool
+ Force bool
+}
+
+// StartOptions holds parameters to start containers.
+type StartOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// ListOptions holds parameters to list containers with.
+type ListOptions struct {
+ Size bool
+ All bool
+ Latest bool
+ Since string
+ Before string
+ Limit int
+ Filters filters.Args
+}
+
+// LogsOptions holds parameters to filter logs with.
+type LogsOptions struct {
+ ShowStdout bool
+ ShowStderr bool
+ Since string
+ Until string
+ Timestamps bool
+ Follow bool
+ Tail string
+ Details bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/port.go b/vendor/github.com/docker/docker/api/types/container/port.go
new file mode 100644
index 0000000..895043c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/port.go
@@ -0,0 +1,23 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Port An open port on a container
+// swagger:model Port
+type Port struct {
+
+ // Host IP address that the container's port is mapped to
+ IP string `json:"IP,omitempty"`
+
+ // Port on the container
+ // Required: true
+ PrivatePort uint16 `json:"PrivatePort"`
+
+ // Port exposed on the host
+ PublicPort uint16 `json:"PublicPort,omitempty"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go
new file mode 100644
index 0000000..3bfeb48
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/stats.go
@@ -0,0 +1,177 @@
+package container
+
+import "time"
+
+// ThrottlingData stores CPU throttling stats of one running container.
+// Not used on Windows.
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods"`
+ // Number of periods when the container hits its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// CPUUsage stores All CPU stats aggregated since container inception.
+type CPUUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds (Linux)
+ // Units: 100's of nanoseconds (Windows)
+ TotalUsage uint64 `json:"total_usage"`
+
+ // Total CPU time consumed per core (Linux). Not used on Windows.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+
+ // Time spent by tasks of the cgroup in kernel mode (Linux).
+ // Time spent by all container processes in kernel mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+
+ // Time spent by tasks of the cgroup in user mode (Linux).
+ // Time spent by all container processes in user mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+// CPUStats aggregates and wraps all CPU related info of container
+type CPUStats struct {
+ // CPU Usage. Linux and Windows.
+ CPUUsage CPUUsage `json:"cpu_usage"`
+
+ // System Usage. Linux only.
+ SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
+
+ // Online CPUs. Linux only.
+ OnlineCPUs uint32 `json:"online_cpus,omitempty"`
+
+ // Throttling Data. Linux only.
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+// MemoryStats aggregates all memory stats since container inception on Linux.
+// Windows returns stats for commit and private working set only.
+type MemoryStats struct {
+ // Linux Memory Stats
+
+ // current res_counter usage for memory
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // TODO(vishh): Export these as stronger types.
+ // all the stats exported via memory.stat.
+ Stats map[string]uint64 `json:"stats,omitempty"`
+ // number of times memory usage hits limits.
+ Failcnt uint64 `json:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty"`
+
+ // Windows Memory Stats
+ // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
+
+ // committed bytes
+ Commit uint64 `json:"commitbytes,omitempty"`
+ // peak committed bytes
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
+ // private working set
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
+}
+
+// BlkioStatEntry is one small entity to store a piece of Blkio stats
+// Not used on Windows.
+type BlkioStatEntry struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Op string `json:"op"`
+ Value uint64 `json:"value"`
+}
+
+// BlkioStats stores All IO service stats for data read and write.
+// This is a Linux specific structure as the differences between expressing
+// block I/O on Windows and Linux are sufficiently significant to make
+// little sense attempting to morph into a combined structure.
+type BlkioStats struct {
+ // number of bytes transferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+// StorageStats is the disk I/O stats for read/write on Windows.
+type StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
+}
+
+// NetworkStats aggregates the network stats of one container
+type NetworkStats struct {
+ // Bytes received. Windows and Linux.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Packets received. Windows and Linux.
+ RxPackets uint64 `json:"rx_packets"`
+ // Received errors. Not used on Windows. Note that we don't `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ RxErrors uint64 `json:"rx_errors"`
+ // Incoming packets dropped. Windows and Linux.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Bytes sent. Windows and Linux.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Packets sent. Windows and Linux.
+ TxPackets uint64 `json:"tx_packets"`
+ // Sent errors. Not used on Windows. Note that we don't `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ TxErrors uint64 `json:"tx_errors"`
+ // Outgoing packets dropped. Windows and Linux.
+ TxDropped uint64 `json:"tx_dropped"`
+ // Endpoint ID. Not used on Linux.
+ EndpointID string `json:"endpoint_id,omitempty"`
+ // Instance ID. Not used on Linux.
+ InstanceID string `json:"instance_id,omitempty"`
+}
+
+// PidsStats contains the stats of a container's pids
+type PidsStats struct {
+ // Current is the number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // Limit is the hard limit on the number of pids in the cgroup.
+ // A "Limit" of 0 means that there is no limit.
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+// Stats is Ultimate struct aggregating all types of stats of one container
+//
+// Deprecated: use [StatsResponse] instead. This type will be removed in the next release.
+type Stats = StatsResponse
+
+// StatsResponse aggregates all types of stats of one container.
+type StatsResponse struct {
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+
+ // Common stats
+ Read time.Time `json:"read"`
+ PreRead time.Time `json:"preread"`
+
+ // Linux specific stats, not populated on Windows.
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+
+ // Windows specific stats, not populated on Linux.
+ NumProcs uint32 `json:"num_procs"`
+ StorageStats StorageStats `json:"storage_stats,omitempty"`
+
+ // Shared stats
+ CPUStats CPUStats `json:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ Networks map[string]NetworkStats `json:"networks,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/top_response.go b/vendor/github.com/docker/docker/api/types/container/top_response.go
new file mode 100644
index 0000000..b4bae5e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/top_response.go
@@ -0,0 +1,18 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// TopResponse ContainerTopResponse
+//
+// Container "top" response.
+// swagger:model TopResponse
+type TopResponse struct {
+
+ // Each process running in the container, where each process
+ // is an array of values corresponding to the titles.
+ Processes [][]string `json:"Processes"`
+
+ // The ps column titles
+ Titles []string `json:"Titles"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/update_response.go b/vendor/github.com/docker/docker/api/types/container/update_response.go
new file mode 100644
index 0000000..e2b5bf5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/update_response.go
@@ -0,0 +1,14 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// UpdateResponse ContainerUpdateResponse
+//
+// Response for a successful container-update.
+// swagger:model UpdateResponse
+type UpdateResponse struct {
+
+ // Warnings encountered when updating the container.
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go
new file mode 100644
index 0000000..ab56d4e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go
@@ -0,0 +1,12 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// WaitExitError container waiting error, if any
+// swagger:model WaitExitError
+type WaitExitError struct {
+
+ // Details of an error
+ Message string `json:"Message,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go
new file mode 100644
index 0000000..84fc6af
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go
@@ -0,0 +1,18 @@
+package container
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// WaitResponse ContainerWaitResponse
+//
+// OK response to ContainerWait operation
+// swagger:model WaitResponse
+type WaitResponse struct {
+
+ // error
+ Error *WaitExitError `json:"Error,omitempty"`
+
+ // Exit code of the container
+ // Required: true
+ StatusCode int64 `json:"StatusCode"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
new file mode 100644
index 0000000..cd8311f
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
@@ -0,0 +1,22 @@
+package container // import "github.com/docker/docker/api/types/container"
+
+// WaitCondition is a type used to specify a container state for which
+// to wait.
+type WaitCondition string
+
+// Possible WaitCondition Values.
+//
+// WaitConditionNotRunning (default) is used to wait for any of the non-running
+// states: "created", "exited", "dead", "removing", or "removed".
+//
+// WaitConditionNextExit is used to wait for the next time the state changes
+// to a non-running state. If the state is currently "created" or "exited",
+// this would cause Wait() to block until either the container runs and exits
+// or is removed.
+//
+// WaitConditionRemoved is used to wait for the container to be removed.
+const (
+ WaitConditionNotRunning WaitCondition = "not-running"
+ WaitConditionNextExit WaitCondition = "next-exit"
+ WaitConditionRemoved WaitCondition = "removed"
+)
diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go
new file mode 100644
index 0000000..dc942d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/error_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ErrorResponse Represents an error.
+// swagger:model ErrorResponse
+type ErrorResponse struct {
+
+ // The error message.
+ // Required: true
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go
new file mode 100644
index 0000000..f84f034
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go
@@ -0,0 +1,6 @@
+package types
+
+// Error returns the error message
+func (e ErrorResponse) Error() string {
+ return e.Message
+}
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
new file mode 100644
index 0000000..e225df4
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/events/events.go
@@ -0,0 +1,135 @@
+package events // import "github.com/docker/docker/api/types/events"
+import "github.com/docker/docker/api/types/filters"
+
+// Type is used for event-types.
+type Type string
+
+// List of known event types.
+const (
+ BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates.
+ ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate.
+ ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate.
+ DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate.
+ ImageEventType Type = "image" // ImageEventType is the event type that images generate.
+ NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate.
+ NodeEventType Type = "node" // NodeEventType is the event type that nodes generate.
+ PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate.
+ SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate.
+ ServiceEventType Type = "service" // ServiceEventType is the event type that services generate.
+ VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate.
+)
+
+// Action is used for event-actions.
+type Action string
+
+const (
+ ActionCreate Action = "create"
+ ActionStart Action = "start"
+ ActionRestart Action = "restart"
+ ActionStop Action = "stop"
+ ActionCheckpoint Action = "checkpoint"
+ ActionPause Action = "pause"
+ ActionUnPause Action = "unpause"
+ ActionAttach Action = "attach"
+ ActionDetach Action = "detach"
+ ActionResize Action = "resize"
+ ActionUpdate Action = "update"
+ ActionRename Action = "rename"
+ ActionKill Action = "kill"
+ ActionDie Action = "die"
+ ActionOOM Action = "oom"
+ ActionDestroy Action = "destroy"
+ ActionRemove Action = "remove"
+ ActionCommit Action = "commit"
+ ActionTop Action = "top"
+ ActionCopy Action = "copy"
+ ActionArchivePath Action = "archive-path"
+ ActionExtractToDir Action = "extract-to-dir"
+ ActionExport Action = "export"
+ ActionImport Action = "import"
+ ActionSave Action = "save"
+ ActionLoad Action = "load"
+ ActionTag Action = "tag"
+ ActionUnTag Action = "untag"
+ ActionPush Action = "push"
+ ActionPull Action = "pull"
+ ActionPrune Action = "prune"
+ ActionDelete Action = "delete"
+ ActionEnable Action = "enable"
+ ActionDisable Action = "disable"
+ ActionConnect Action = "connect"
+ ActionDisconnect Action = "disconnect"
+ ActionReload Action = "reload"
+ ActionMount Action = "mount"
+ ActionUnmount Action = "unmount"
+
+ // ActionExecCreate is the prefix used for exec_create events. These
+ // event-actions are commonly followed by a colon and space (": "),
+ // and the command that's defined for the exec, for example:
+ //
+ // exec_create: /bin/sh -c 'echo hello'
+ //
+ // This is far from ideal; it's a compromise to allow filtering and
+ // to preserve backward-compatibility.
+ ActionExecCreate Action = "exec_create"
+ // ActionExecStart is the prefix used for exec_create events. These
+ // event-actions are commonly followed by a colon and space (": "),
+ // and the command that's defined for the exec, for example:
+ //
+ // exec_start: /bin/sh -c 'echo hello'
+ //
+ // This is far from ideal; it's a compromise to allow filtering and
+ // to preserve backward-compatibility.
+ ActionExecStart Action = "exec_start"
+ ActionExecDie Action = "exec_die"
+ ActionExecDetach Action = "exec_detach"
+
+ // ActionHealthStatus is the prefix to use for health_status events.
+ //
+ // Health-status events can either have a pre-defined status, in which
+ // case the "health_status" action is followed by a colon, or can be
+ // "free-form", in which case they're followed by the output of the
+ // health-check output.
+ //
+ // This is far form ideal, and a compromise to allow filtering, and
+ // to preserve backward-compatibility.
+ ActionHealthStatus Action = "health_status"
+ ActionHealthStatusRunning Action = "health_status: running"
+ ActionHealthStatusHealthy Action = "health_status: healthy"
+ ActionHealthStatusUnhealthy Action = "health_status: unhealthy"
+)
+
+// Actor describes something that generates events,
+// like a container, or a network, or a volume.
+// It has a defined name and a set of attributes.
+// The container attributes are its labels, other actors
+// can generate these attributes from other properties.
+type Actor struct {
+ ID string
+ Attributes map[string]string
+}
+
+// Message represents the information an event contains
+type Message struct {
+ // Deprecated information from JSONMessage.
+ // With data only in container events.
+ Status string `json:"status,omitempty"` // Deprecated: use Action instead.
+ ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead.
+ From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead.
+
+ Type Type
+ Action Action
+ Actor Actor
+ // Engine events are local scope. Cluster events are swarm scope.
+ Scope string `json:"scope,omitempty"`
+
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
+
+// ListOptions holds parameters to filter events with.
+type ListOptions struct {
+ Since string
+ Until string
+ Filters filters.Args
+}
diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go
new file mode 100644
index 0000000..b8a690d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/filters/errors.go
@@ -0,0 +1,24 @@
+package filters
+
+import "fmt"
+
+// invalidFilter indicates that the provided filter or its value is invalid
+type invalidFilter struct {
+ Filter string
+ Value []string
+}
+
+func (e invalidFilter) Error() string {
+ msg := "invalid filter"
+ if e.Filter != "" {
+ msg += " '" + e.Filter
+ if e.Value != nil {
+ msg = fmt.Sprintf("%s=%s", msg, e.Value)
+ }
+ msg += "'"
+ }
+ return msg
+}
+
+// InvalidParameter marks this error as ErrInvalidParameter
+func (e invalidFilter) InvalidParameter() {}
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
new file mode 100644
index 0000000..2085ff3
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -0,0 +1,336 @@
+/*
+Package filters provides tools for encoding a mapping of keys to a set of
+multiple values.
+*/
+package filters // import "github.com/docker/docker/api/types/filters"
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+
+ "github.com/docker/docker/api/types/versions"
+)
+
+// Args stores a mapping of keys to a set of multiple values.
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// KeyValuePair are used to initialize a new Args
+type KeyValuePair struct {
+ Key string
+ Value string
+}
+
+// Arg creates a new KeyValuePair for initializing Args
+func Arg(key, value string) KeyValuePair {
+ return KeyValuePair{Key: key, Value: value}
+}
+
+// NewArgs returns a new Args populated with the initial args
+func NewArgs(initialArgs ...KeyValuePair) Args {
+ args := Args{fields: map[string]map[string]bool{}}
+ for _, arg := range initialArgs {
+ args.Add(arg.Key, arg.Value)
+ }
+ return args
+}
+
+// Keys returns all the keys in list of Args
+func (args Args) Keys() []string {
+ keys := make([]string, 0, len(args.fields))
+ for k := range args.fields {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// MarshalJSON returns a JSON byte representation of the Args
+func (args Args) MarshalJSON() ([]byte, error) {
+ if len(args.fields) == 0 {
+ return []byte("{}"), nil
+ }
+ return json.Marshal(args.fields)
+}
+
+// ToJSON returns the Args as a JSON encoded string
+func ToJSON(a Args) (string, error) {
+ if a.Len() == 0 {
+ return "", nil
+ }
+ buf, err := json.Marshal(a)
+ return string(buf), err
+}
+
+// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
+// then the encoded format will use an older legacy format where the values are a
+// list of strings, instead of a set.
+//
+// Deprecated: do not use in any new code; use ToJSON instead
+func ToParamWithVersion(version string, a Args) (string, error) {
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err := json.Marshal(convertArgsToSlice(a.fields))
+ return string(buf), err
+ }
+
+ return ToJSON(a)
+}
+
+// FromJSON decodes a JSON encoded string into Args
+func FromJSON(p string) (Args, error) {
+ args := NewArgs()
+
+ if p == "" {
+ return args, nil
+ }
+
+ raw := []byte(p)
+ err := json.Unmarshal(raw, &args)
+ if err == nil {
+ return args, nil
+ }
+
+ // Fallback to parsing arguments in the legacy slice format
+ deprecated := map[string][]string{}
+ if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
+ return args, &invalidFilter{}
+ }
+
+ args.fields = deprecatedArgs(deprecated)
+ return args, nil
+}
+
+// UnmarshalJSON populates the Args from JSON encode bytes
+func (args Args) UnmarshalJSON(raw []byte) error {
+ return json.Unmarshal(raw, &args.fields)
+}
+
+// Get returns the list of values associated with the key
+func (args Args) Get(key string) []string {
+ values := args.fields[key]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add a new value to the set of values
+func (args Args) Add(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ args.fields[key][value] = true
+ } else {
+ args.fields[key] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from the set
+func (args Args) Del(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ delete(args.fields[key], value)
+ if len(args.fields[key]) == 0 {
+ delete(args.fields, key)
+ }
+ }
+}
+
+// Len returns the number of keys in the mapping
+func (args Args) Len() int {
+ return len(args.fields)
+}
+
+// MatchKVList returns true if all the pairs in sources exist as key=value
+// pairs in the mapping at key, or if there are no values at key.
+func (args Args) MatchKVList(key string, sources map[string]string) bool {
+ fieldValues := args.fields[key]
+
+ // do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if len(sources) == 0 {
+ return false
+ }
+
+ for value := range fieldValues {
+ testK, testV, hasValue := strings.Cut(value, "=")
+
+ v, ok := sources[testK]
+ if !ok {
+ return false
+ }
+ if hasValue && testV != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if any of the values at key match the source string
+func (args Args) Match(field, source string) bool {
+ if args.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := args.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// GetBoolOrDefault returns a boolean value of the key if the key is present
+// and is interpretable as a boolean value. Otherwise the default value is returned.
+// Error is not nil only if the filter values are not valid boolean or are conflicting.
+func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
+ fieldValues, ok := args.fields[key]
+ if !ok {
+ return defaultValue, nil
+ }
+
+ if len(fieldValues) == 0 {
+ return defaultValue, &invalidFilter{key, nil}
+ }
+
+ isFalse := fieldValues["0"] || fieldValues["false"]
+ isTrue := fieldValues["1"] || fieldValues["true"]
+ if isFalse == isTrue {
+ // Either no or conflicting truthy/falsy value were provided
+ return defaultValue, &invalidFilter{key, args.Get(key)}
+ }
+ return isTrue, nil
+}
+
+// ExactMatch returns true if the source matches exactly one of the values.
+func (args Args) ExactMatch(key, source string) bool {
+ fieldValues, ok := args.fields[key]
+ // do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one value and the source
+// matches exactly the value.
+func (args Args) UniqueExactMatch(key, source string) bool {
+ fieldValues := args.fields[key]
+ // do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(args.fields[key]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one value, or the
+// source has one of the values as a prefix.
+func (args Args) FuzzyMatch(key, source string) bool {
+ if args.ExactMatch(key, source) {
+ return true
+ }
+
+ fieldValues := args.fields[key]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Contains returns true if the key exists in the mapping
+func (args Args) Contains(field string) bool {
+ _, ok := args.fields[field]
+ return ok
+}
+
+// Validate compared the set of accepted keys against the keys in the mapping.
+// An error is returned if any mapping keys are not in the accepted set.
+func (args Args) Validate(accepted map[string]bool) error {
+ for name := range args.fields {
+ if !accepted[name] {
+ return &invalidFilter{name, nil}
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of values for a key in the mapping and calls
+// op() for each value. If op returns an error the iteration stops and the
+// error is returned.
+func (args Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := args.fields[field]; !ok {
+ return nil
+ }
+ for v := range args.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Clone returns a copy of args.
+func (args Args) Clone() (newArgs Args) {
+ newArgs.fields = make(map[string]map[string]bool, len(args.fields))
+ for k, m := range args.fields {
+ var mm map[string]bool
+ if m != nil {
+ mm = make(map[string]bool, len(m))
+ for kk, v := range m {
+ mm[kk] = v
+ }
+ }
+ newArgs.fields[k] = mm
+ }
+ return newArgs
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/delete_response.go b/vendor/github.com/docker/docker/api/types/image/delete_response.go
new file mode 100644
index 0000000..998620d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/delete_response.go
@@ -0,0 +1,15 @@
+package image
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// DeleteResponse delete response
+// swagger:model DeleteResponse
+type DeleteResponse struct {
+
+ // The image ID of an image that was deleted
+ Deleted string `json:"Deleted,omitempty"`
+
+ // The image ID of an image that was untagged
+ Untagged string `json:"Untagged,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go
new file mode 100644
index 0000000..abb7ffd
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/image.go
@@ -0,0 +1,47 @@
+package image
+
+import (
+ "io"
+ "time"
+)
+
+// Metadata contains engine-local data about the image.
+type Metadata struct {
+ // LastTagTime is the date and time at which the image was last tagged.
+ LastTagTime time.Time `json:",omitempty"`
+}
+
+// PruneReport contains the response for Engine API:
+// POST "/images/prune"
+type PruneReport struct {
+ ImagesDeleted []DeleteResponse
+ SpaceReclaimed uint64
+}
+
+// LoadResponse returns information to the client about a load process.
+//
+// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
+//
+// This type was added in https://github.com/moby/moby/pull/18878, related
+// to https://github.com/moby/moby/issues/19177;
+//
+// Make docker load to output json when the response content type is json
+// Swarm hijacks the response from docker load and returns JSON rather
+// than plain text like the Engine does. This makes the API library to return
+// information to figure that out.
+//
+// However the "load" endpoint unconditionally returns JSON;
+// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
+//
+// PR https://github.com/moby/moby/pull/21959 made the response-type depend
+// on whether "quiet" was set, but this logic got changed in a follow-up
+// https://github.com/moby/moby/pull/25557, which made the JSON response-type
+// unconditionally, but the output produced depend on whether"quiet" was set.
+//
+// We should deprecated the "quiet" option, as it's really a client
+// responsibility.
+type LoadResponse struct {
+ // Body must be closed to avoid a resource leak
+ Body io.ReadCloser
+ JSON bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go
new file mode 100644
index 0000000..e302bb0
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/image_history.go
@@ -0,0 +1,36 @@
+package image // import "github.com/docker/docker/api/types/image"
+
+// ----------------------------------------------------------------------------
+// Code generated by `swagger generate operation`. DO NOT EDIT.
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// HistoryResponseItem individual image layer information in response to ImageHistory operation
+// swagger:model HistoryResponseItem
+type HistoryResponseItem struct {
+
+ // comment
+ // Required: true
+ Comment string `json:"Comment"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // created by
+ // Required: true
+ CreatedBy string `json:"CreatedBy"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // tags
+ // Required: true
+ Tags []string `json:"Tags"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
new file mode 100644
index 0000000..78e81f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go
@@ -0,0 +1,140 @@
+package image
+
+import (
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/storage"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// RootFS returns Image's RootFS description including the layer IDs.
+type RootFS struct {
+ Type string `json:",omitempty"`
+ Layers []string `json:",omitempty"`
+}
+
+// InspectResponse contains response of Engine API:
+// GET "/images/{name:.*}/json"
+type InspectResponse struct {
+ // ID is the content-addressable ID of an image.
+ //
+ // This identifier is a content-addressable digest calculated from the
+ // image's configuration (which includes the digests of layers used by
+ // the image).
+ //
+ // Note that this digest differs from the `RepoDigests` below, which
+ // holds digests of image manifests that reference the image.
+ ID string `json:"Id"`
+
+ // RepoTags is a list of image names/tags in the local image cache that
+ // reference this image.
+ //
+ // Multiple image tags can refer to the same image, and this list may be
+ // empty if no tags reference the image, in which case the image is
+ // "untagged", in which case it can still be referenced by its ID.
+ RepoTags []string
+
+ // RepoDigests is a list of content-addressable digests of locally available
+ // image manifests that the image is referenced from. Multiple manifests can
+ // refer to the same image.
+ //
+ // These digests are usually only available if the image was either pulled
+ // from a registry, or if the image was pushed to a registry, which is when
+ // the manifest is generated and its digest calculated.
+ RepoDigests []string
+
+ // Parent is the ID of the parent image.
+ //
+ // Depending on how the image was created, this field may be empty and
+ // is only set for images that were built/created locally. This field
+ // is empty if the image was pulled from an image registry.
+ Parent string
+
+ // Comment is an optional message that can be set when committing or
+ // importing the image.
+ Comment string
+
+ // Created is the date and time at which the image was created, formatted in
+ // RFC 3339 nano-seconds (time.RFC3339Nano).
+ //
+ // This information is only available if present in the image,
+ // and omitted otherwise.
+ Created string `json:",omitempty"`
+
+ // Container is the ID of the container that was used to create the image.
+ //
+ // Depending on how the image was created, this field may be empty.
+ //
+ // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
+ Container string `json:",omitempty"`
+
+ // ContainerConfig is an optional field containing the configuration of the
+ // container that was last committed when creating the image.
+ //
+ // Previous versions of Docker builder used this field to store build cache,
+ // and it is not in active use anymore.
+ //
+ // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
+ ContainerConfig *container.Config `json:",omitempty"`
+
+ // DockerVersion is the version of Docker that was used to build the image.
+ //
+ // Depending on how the image was created, this field may be empty.
+ DockerVersion string
+
+ // Author is the name of the author that was specified when committing the
+ // image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
+ Author string
+ Config *container.Config
+
+ // Architecture is the hardware CPU architecture that the image runs on.
+ Architecture string
+
+ // Variant is the CPU architecture variant (presently ARM-only).
+ Variant string `json:",omitempty"`
+
+ // OS is the Operating System the image is built to run on.
+ Os string
+
+ // OsVersion is the version of the Operating System the image is built to
+ // run on (especially for Windows).
+ OsVersion string `json:",omitempty"`
+
+ // Size is the total size of the image including all layers it is composed of.
+ Size int64
+
+ // VirtualSize is the total size of the image including all layers it is
+ // composed of.
+ //
+ // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
+ VirtualSize int64 `json:"VirtualSize,omitempty"`
+
+ // GraphDriver holds information about the storage driver used to store the
+ // container's and image's filesystem.
+ GraphDriver storage.DriverData
+
+ // RootFS contains information about the image's RootFS, including the
+ // layer IDs.
+ RootFS RootFS
+
+ // Metadata of the image in the local cache.
+ //
+ // This information is local to the daemon, and not part of the image itself.
+ Metadata Metadata
+
+ // Descriptor is the OCI descriptor of the image target.
+ // It's only set if the daemon provides a multi-platform image store.
+ //
+ // WARNING: This is experimental and may change at any time without any backward
+ // compatibility.
+ Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
+
+ // Manifests is a list of image manifests available in this image. It
+ // provides a more detailed view of the platform-specific image manifests or
+ // other image-attached data like build attestations.
+ //
+ // Only available if the daemon provides a multi-platform image store.
+ //
+ // WARNING: This is experimental and may change at any time without any backward
+ // compatibility.
+ Manifests []ManifestSummary `json:"Manifests,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go
new file mode 100644
index 0000000..db8a008
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/manifest.go
@@ -0,0 +1,99 @@
+package image
+
+import (
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type ManifestKind string
+
+const (
+ ManifestKindImage ManifestKind = "image"
+ ManifestKindAttestation ManifestKind = "attestation"
+ ManifestKindUnknown ManifestKind = "unknown"
+)
+
+type ManifestSummary struct {
+ // ID is the content-addressable ID of an image and is the same as the
+ // digest of the image manifest.
+ //
+ // Required: true
+ ID string `json:"ID"`
+
+ // Descriptor is the OCI descriptor of the image.
+ //
+ // Required: true
+ Descriptor ocispec.Descriptor `json:"Descriptor"`
+
+ // Indicates whether all the child content (image config, layers) is
+ // fully available locally
+ //
+ // Required: true
+ Available bool `json:"Available"`
+
+ // Size is the size information of the content related to this manifest.
+ // Note: These sizes only take the locally available content into account.
+ //
+ // Required: true
+ Size struct {
+ // Content is the size (in bytes) of all the locally present
+ // content in the content store (e.g. image config, layers)
+ // referenced by this manifest and its children.
+ // This only includes blobs in the content store.
+ Content int64 `json:"Content"`
+
+ // Total is the total size (in bytes) of all the locally present
+ // data (both distributable and non-distributable) that's related to
+ // this manifest and its children.
+ // This equal to the sum of [Content] size AND all the sizes in the
+ // [Size] struct present in the Kind-specific data struct.
+ // For example, for an image kind (Kind == ManifestKindImage),
+ // this would include the size of the image content and unpacked
+ // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
+ Total int64 `json:"Total"`
+ } `json:"Size"`
+
+ // Kind is the kind of the image manifest.
+ //
+ // Required: true
+ Kind ManifestKind `json:"Kind"`
+
+ // Fields below are specific to the kind of the image manifest.
+
+ // Present only if Kind == ManifestKindImage.
+ ImageData *ImageProperties `json:"ImageData,omitempty"`
+
+ // Present only if Kind == ManifestKindAttestation.
+ AttestationData *AttestationProperties `json:"AttestationData,omitempty"`
+}
+
+type ImageProperties struct {
+ // Platform is the OCI platform object describing the platform of the image.
+ //
+ // Required: true
+ Platform ocispec.Platform `json:"Platform"`
+
+ Size struct {
+ // Unpacked is the size (in bytes) of the locally unpacked
+ // (uncompressed) image content that's directly usable by the containers
+ // running this image.
+ // It's independent of the distributable content - e.g.
+ // the image might still have an unpacked data that's still used by
+ // some container even when the distributable/compressed content is
+ // already gone.
+ //
+ // Required: true
+ Unpacked int64 `json:"Unpacked"`
+ }
+
+ // Containers is an array containing the IDs of the containers that are
+ // using this image.
+ //
+ // Required: true
+ Containers []string `json:"Containers"`
+}
+
+type AttestationProperties struct {
+ // For is the digest of the image manifest that this attestation is for.
+ For digest.Digest `json:"For"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go
new file mode 100644
index 0000000..919510f
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/opts.go
@@ -0,0 +1,116 @@
+package image
+
+import (
+ "context"
+ "io"
+
+ "github.com/docker/docker/api/types/filters"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImportSource holds source information for ImageImport
+type ImportSource struct {
+ Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
+ SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
+}
+
+// ImportOptions holds information to import images from the client host.
+type ImportOptions struct {
+ Tag string // Tag is the name to tag this image with. This attribute is deprecated.
+ Message string // Message is the message to tag the image with
+ Changes []string // Changes are the raw changes to apply to this image
+ Platform string // Platform is the target platform of the image
+}
+
+// CreateOptions holds information to create images.
+type CreateOptions struct {
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
+ Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
+}
+
+// PullOptions holds information to pull images.
+type PullOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+
+ // PrivilegeFunc is a function that clients can supply to retry operations
+ // after getting an authorization error. This function returns the registry
+ // authentication header value in base64 encoded format, or an error if the
+ // privilege request fails.
+ //
+ // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
+ PrivilegeFunc func(context.Context) (string, error)
+ Platform string
+}
+
+// PushOptions holds information to push images.
+type PushOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+
+ // PrivilegeFunc is a function that clients can supply to retry operations
+ // after getting an authorization error. This function returns the registry
+ // authentication header value in base64 encoded format, or an error if the
+ // privilege request fails.
+ //
+ // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
+ PrivilegeFunc func(context.Context) (string, error)
+
+ // Platform is an optional field that selects a specific platform to push
+ // when the image is a multi-platform image.
+ // Using this will only push a single platform-specific manifest.
+ Platform *ocispec.Platform `json:",omitempty"`
+}
+
+// ListOptions holds parameters to list images with.
+type ListOptions struct {
+ // All controls whether all images in the graph are filtered, or just
+ // the heads.
+ All bool
+
+ // Filters is a JSON-encoded set of filter arguments.
+ Filters filters.Args
+
+ // SharedSize indicates whether the shared size of images should be computed.
+ SharedSize bool
+
+ // ContainerCount indicates whether container count should be computed.
+ ContainerCount bool
+
+ // Manifests indicates whether the image manifests should be returned.
+ Manifests bool
+}
+
+// RemoveOptions holds parameters to remove images.
+type RemoveOptions struct {
+ Force bool
+ PruneChildren bool
+}
+
+// HistoryOptions holds parameters to get image history.
+type HistoryOptions struct {
+ // Platform from the manifest list to use for history.
+ Platform *ocispec.Platform
+}
+
+// LoadOptions holds parameters to load images.
+type LoadOptions struct {
+ // Quiet suppresses progress output
+ Quiet bool
+
+ // Platforms selects the platforms to load if the image is a
+ // multi-platform image and has multiple variants.
+ Platforms []ocispec.Platform
+}
+
+type InspectOptions struct {
+ // Manifests returns the image manifests.
+ Manifests bool
+}
+
+// SaveOptions holds parameters to save images.
+type SaveOptions struct {
+ // Platforms selects the platforms to save if the image is a
+ // multi-platform image and has multiple variants.
+ Platforms []ocispec.Platform
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go
new file mode 100644
index 0000000..c5ae6ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/summary.go
@@ -0,0 +1,101 @@
+package image
+
+import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+type Summary struct {
+
+ // Number of containers using this image. Includes both stopped and running
+ // containers.
+ //
+ // This size is not calculated by default, and depends on which API endpoint
+ // is used. `-1` indicates that the value has not been set / calculated.
+ //
+ // Required: true
+ Containers int64 `json:"Containers"`
+
+ // Date and time at which the image was created as a Unix timestamp
+ // (number of seconds since EPOCH).
+ //
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // ID is the content-addressable ID of an image.
+ //
+ // This identifier is a content-addressable digest calculated from the
+ // image's configuration (which includes the digests of layers used by
+ // the image).
+ //
+ // Note that this digest differs from the `RepoDigests` below, which
+ // holds digests of image manifests that reference the image.
+ //
+ // Required: true
+ ID string `json:"Id"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // ID of the parent image.
+ //
+ // Depending on how the image was created, this field may be empty and
+ // is only set for images that were built/created locally. This field
+ // is empty if the image was pulled from an image registry.
+ //
+ // Required: true
+ ParentID string `json:"ParentId"`
+
+ // Descriptor is the OCI descriptor of the image target.
+ // It's only set if the daemon provides a multi-platform image store.
+ //
+ // WARNING: This is experimental and may change at any time without any backward
+ // compatibility.
+ Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
+
+ // Manifests is a list of image manifests available in this image. It
+ // provides a more detailed view of the platform-specific image manifests or
+ // other image-attached data like build attestations.
+ //
+ // WARNING: This is experimental and may change at any time without any backward
+ // compatibility.
+ Manifests []ManifestSummary `json:"Manifests,omitempty"`
+
+ // List of content-addressable digests of locally available image manifests
+ // that the image is referenced from. Multiple manifests can refer to the
+ // same image.
+ //
+ // These digests are usually only available if the image was either pulled
+ // from a registry, or if the image was pushed to a registry, which is when
+ // the manifest is generated and its digest calculated.
+ //
+ // Required: true
+ RepoDigests []string `json:"RepoDigests"`
+
+ // List of image names/tags in the local image cache that reference this
+ // image.
+ //
+ // Multiple image tags can refer to the same image, and this list may be
+ // empty if no tags reference the image, in which case the image is
+ // "untagged", in which case it can still be referenced by its ID.
+ //
+ // Required: true
+ RepoTags []string `json:"RepoTags"`
+
+ // Total size of image layers that are shared between this image and other
+ // images.
+ //
+ // This size is not calculated by default. `-1` indicates that the value
+ // has not been set / calculated.
+ //
+ // Required: true
+ SharedSize int64 `json:"SharedSize"`
+
+ // Total size of the image including all layers it is composed of.
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // Total size of the image including all layers it is composed of.
+ //
+ // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
+ VirtualSize int64 `json:"VirtualSize,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
new file mode 100644
index 0000000..d98dbec
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -0,0 +1,157 @@
+package mount // import "github.com/docker/docker/api/types/mount"
+
+import (
+ "os"
+)
+
+// Type represents the type of a mount.
+type Type string
+
+// Type constants
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind Type = "bind"
+ // TypeVolume is the type for remote storage volumes
+ TypeVolume Type = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs Type = "tmpfs"
+ // TypeNamedPipe is the type for mounting Windows named pipes
+ TypeNamedPipe Type = "npipe"
+ // TypeCluster is the type for Swarm Cluster Volumes.
+ TypeCluster Type = "cluster"
+ // TypeImage is the type for mounting another image's filesystem
+ TypeImage Type = "image"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type Type `json:",omitempty"`
+ // Source specifies the name of the mount. Depending on mount type, this
+ // may be a volume name or a host path, or even ignored.
+ // Source is not supported for tmpfs (must be an empty value)
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible
+ Consistency Consistency `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+ ImageOptions *ImageOptions `json:",omitempty"`
+ TmpfsOptions *TmpfsOptions `json:",omitempty"`
+ ClusterOptions *ClusterOptions `json:",omitempty"`
+}
+
+// Propagation represents the propagation of a mount.
+type Propagation string
+
+const (
+ // PropagationRPrivate RPRIVATE
+ PropagationRPrivate Propagation = "rprivate"
+ // PropagationPrivate PRIVATE
+ PropagationPrivate Propagation = "private"
+ // PropagationRShared RSHARED
+ PropagationRShared Propagation = "rshared"
+ // PropagationShared SHARED
+ PropagationShared Propagation = "shared"
+ // PropagationRSlave RSLAVE
+ PropagationRSlave Propagation = "rslave"
+ // PropagationSlave SLAVE
+ PropagationSlave Propagation = "slave"
+)
+
+// Propagations is the list of all valid mount propagations
+var Propagations = []Propagation{
+ PropagationRPrivate,
+ PropagationPrivate,
+ PropagationRShared,
+ PropagationShared,
+ PropagationRSlave,
+ PropagationSlave,
+}
+
+// Consistency represents the consistency requirements of a mount.
+type Consistency string
+
+const (
+ // ConsistencyFull guarantees bind mount-like consistency
+ ConsistencyFull Consistency = "consistent"
+ // ConsistencyCached mounts can cache read data and FS structure
+ ConsistencyCached Consistency = "cached"
+ // ConsistencyDelegated mounts can cache read and written data and structure
+ ConsistencyDelegated Consistency = "delegated"
+ // ConsistencyDefault provides "consistent" behavior unless overridden
+ ConsistencyDefault Consistency = "default"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation Propagation `json:",omitempty"`
+ NonRecursive bool `json:",omitempty"`
+ CreateMountpoint bool `json:",omitempty"`
+ // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive
+ // (unless NonRecursive is set to true in conjunction).
+ ReadOnlyNonRecursive bool `json:",omitempty"`
+ // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only.
+ ReadOnlyForceRecursive bool `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Subpath string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
+
+type ImageOptions struct {
+ Subpath string `json:",omitempty"`
+}
+
+// Driver represents a volume driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TmpfsOptions defines options specific to mounts of type "tmpfs".
+type TmpfsOptions struct {
+ // Size sets the size of the tmpfs, in bytes.
+ //
+ // This will be converted to an operating system specific value
+ // depending on the host. For example, on linux, it will be converted to
+ // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
+ // docker, uses a straight byte value.
+ //
+ // Percentages are not supported.
+ SizeBytes int64 `json:",omitempty"`
+ // Mode of the tmpfs upon creation
+ Mode os.FileMode `json:",omitempty"`
+ // Options to be passed to the tmpfs mount. An array of arrays. Flag
+ // options should be provided as 1-length arrays. Other types should be
+ // provided as 2-length arrays, where the first item is the key and the
+ // second the value.
+ Options [][]string `json:",omitempty"`
+ // TODO(stevvooe): There are several more tmpfs flags, specified in the
+ // daemon, that are accepted. Only the most basic are added for now.
+ //
+ // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56
+ //
+ // var validFlags = map[string]bool{
+ // "": true,
+ // "size": true, X
+ // "mode": true, X
+ // "uid": true,
+ // "gid": true,
+ // "nr_inodes": true,
+ // "nr_blocks": true,
+ // "mpol": true,
+ // }
+ //
+ // Some of these may be straightforward to add, but others, such as
+ // uid/gid have implications in a clustered system.
+}
+
+// ClusterOptions specifies options for a Cluster volume.
+type ClusterOptions struct {
+ // intentionally empty
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/create_response.go b/vendor/github.com/docker/docker/api/types/network/create_response.go
new file mode 100644
index 0000000..c32b35b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/create_response.go
@@ -0,0 +1,19 @@
+package network
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// CreateResponse NetworkCreateResponse
+//
+// OK response to NetworkCreate operation
+// swagger:model CreateResponse
+type CreateResponse struct {
+
+ // The ID of the created network.
+ // Required: true
+ ID string `json:"Id"`
+
+ // Warnings encountered when creating the container
+ // Required: true
+ Warning string `json:"Warning"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go
new file mode 100644
index 0000000..167ac70
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go
@@ -0,0 +1,153 @@
+package network
+
+import (
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/docker/docker/internal/multierror"
+)
+
+// EndpointSettings stores the network endpoint details
+type EndpointSettings struct {
+ // Configurations
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint.
+ // MacAddress may be used to specify a MAC address when the container is created.
+ // Once the container is running, it becomes operational data (it may contain a
+ // generated address).
+ MacAddress string
+ DriverOpts map[string]string
+
+ // GwPriority determines which endpoint will provide the default gateway
+ // for the container. The endpoint with the highest priority will be used.
+ // If multiple endpoints have the same priority, they are lexicographically
+ // sorted based on their network name, and the one that sorts first is picked.
+ GwPriority int
+ // Operational data
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to
+ // generate PTR records.
+ DNSNames []string
+}
+
+// Copy makes a deep copy of `EndpointSettings`
+func (es *EndpointSettings) Copy() *EndpointSettings {
+ epCopy := *es
+ if es.IPAMConfig != nil {
+ epCopy.IPAMConfig = es.IPAMConfig.Copy()
+ }
+
+ if es.Links != nil {
+ links := make([]string, 0, len(es.Links))
+ epCopy.Links = append(links, es.Links...)
+ }
+
+ if es.Aliases != nil {
+ aliases := make([]string, 0, len(es.Aliases))
+ epCopy.Aliases = append(aliases, es.Aliases...)
+ }
+
+ if len(es.DNSNames) > 0 {
+ epCopy.DNSNames = make([]string, len(es.DNSNames))
+ copy(epCopy.DNSNames, es.DNSNames)
+ }
+
+ return &epCopy
+}
+
+// EndpointIPAMConfig represents IPAM configurations for the endpoint
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
+}
+
+// Copy makes a copy of the endpoint ipam config
+func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
+ cfgCopy := *cfg
+ cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
+ cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
+ return &cfgCopy
+}
+
+// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an
+// EndpointIPAMConfig is valid for a specific network.
+type NetworkSubnet interface {
+ // Contains checks whether the NetworkSubnet contains [addr].
+ Contains(addr net.IP) bool
+ // IsStatic checks whether the subnet was statically allocated (ie. user-defined).
+ IsStatic() bool
+}
+
+// IsInRange checks whether static IP addresses are valid in a specific network.
+func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error {
+ var errs []error
+
+ if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil {
+ errs = append(errs, err)
+ }
+ if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil {
+ errs = append(errs, err)
+ }
+
+ return multierror.Join(errs...)
+}
+
+func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error {
+ if epAddr == "" {
+ return nil
+ }
+
+ var staticSubnet bool
+ parsedAddr := net.ParseIP(epAddr)
+ for _, subnet := range ipamSubnets {
+ if subnet.IsStatic() {
+ staticSubnet = true
+ if subnet.Contains(parsedAddr) {
+ return nil
+ }
+ }
+ }
+
+ if staticSubnet {
+ return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr)
+ }
+
+ return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets")
+}
+
+// Validate checks whether cfg is valid.
+func (cfg *EndpointIPAMConfig) Validate() error {
+ if cfg == nil {
+ return nil
+ }
+
+ var errs []error
+
+ if cfg.IPv4Address != "" {
+ if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() {
+ errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address))
+ }
+ }
+ if cfg.IPv6Address != "" {
+ if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() {
+ errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address))
+ }
+ }
+ for _, addr := range cfg.LinkLocalIPs {
+ if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() {
+ errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr))
+ }
+ }
+
+ return multierror.Join(errs...)
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go
new file mode 100644
index 0000000..f319e14
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/ipam.go
@@ -0,0 +1,134 @@
+package network
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+
+ "github.com/docker/docker/internal/multierror"
+)
+
+// IPAM represents IP Address Management
+type IPAM struct {
+ Driver string
+ Options map[string]string // Per network IPAM driver options
+ Config []IPAMConfig
+}
+
+// IPAMConfig represents IPAM configurations
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+type ipFamily string
+
+const (
+ ip4 ipFamily = "IPv4"
+ ip6 ipFamily = "IPv6"
+)
+
+// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of
+// errors found.
+func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error {
+ if ipam == nil {
+ return nil
+ }
+
+ var errs []error
+ for _, cfg := range ipam.Config {
+ subnet, err := netip.ParsePrefix(cfg.Subnet)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet))
+ continue
+ }
+ subnetFamily := ip4
+ if subnet.Addr().Is6() {
+ subnetFamily = ip6
+ }
+
+ if !enableIPv6 && subnetFamily == ip6 {
+ continue
+ }
+
+ if subnet != subnet.Masked() {
+ errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked()))
+ }
+
+ if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 {
+ errs = append(errs, ipRangeErrs...)
+ }
+
+ if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil {
+ errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err))
+ }
+
+ for auxName, aux := range cfg.AuxAddress {
+ if err := validateAddress(aux, subnet, subnetFamily); err != nil {
+ errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err))
+ }
+ }
+ }
+
+ if err := multierror.Join(errs...); err != nil {
+ return fmt.Errorf("invalid network config:\n%w", err)
+ }
+
+ return nil
+}
+
+func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error {
+ if ipRange == "" {
+ return nil
+ }
+ prefix, err := netip.ParsePrefix(ipRange)
+ if err != nil {
+ return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)}
+ }
+ family := ip4
+ if prefix.Addr().Is6() {
+ family = ip6
+ }
+
+ if family != subnetFamily {
+ return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)}
+ }
+
+ var errs []error
+ if prefix.Bits() < subnet.Bits() {
+ errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet))
+ }
+ if prefix != prefix.Masked() {
+ errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked()))
+ }
+ if !subnet.Overlaps(prefix) {
+ errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet))
+ }
+
+ return errs
+}
+
+func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error {
+ if address == "" {
+ return nil
+ }
+ addr, err := netip.ParseAddr(address)
+ if err != nil {
+ return errors.New("invalid address")
+ }
+ family := ip4
+ if addr.Is6() {
+ family = ip6
+ }
+
+ if family != subnetFamily {
+ return fmt.Errorf("parent subnet is an %s block", subnetFamily)
+ }
+ if !subnet.Contains(addr) {
+ return fmt.Errorf("parent subnet %s doesn't contain this address", subnet)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go
new file mode 100644
index 0000000..d34b8ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/network.go
@@ -0,0 +1,168 @@
+package network // import "github.com/docker/docker/api/types/network"
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+const (
+ // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack.
+ NetworkDefault = "default"
+ // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux)
+ NetworkHost = "host"
+ // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows)
+ NetworkNone = "none"
+ // NetworkBridge is the name of the default network on Linux
+ NetworkBridge = "bridge"
+ // NetworkNat is the name of the default network on Windows
+ NetworkNat = "nat"
+)
+
+// CreateRequest is the request message sent to the server for network create call.
+type CreateRequest struct {
+ CreateOptions
+ Name string // Name is the requested name of the network.
+
+ // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
+ // package to older daemons.
+ CheckDuplicate *bool `json:",omitempty"`
+}
+
+// CreateOptions holds options to create a network.
+type CreateOptions struct {
+ Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
+ Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
+ EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4.
+ EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
+ IPAM *IPAM // IPAM is the network's IP Address Management.
+ Internal bool // Internal represents if the network is used internal only.
+ Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
+ Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
+ ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
+ ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly].
+ Options map[string]string // Options specifies the network-specific options to use for when creating the network.
+ Labels map[string]string // Labels holds metadata specific to the network being created.
+}
+
+// ListOptions holds parameters to filter the list of networks with.
+type ListOptions struct {
+ Filters filters.Args
+}
+
+// InspectOptions holds parameters to inspect network.
+type InspectOptions struct {
+ Scope string
+ Verbose bool
+}
+
+// ConnectOptions represents the data to be used to connect a container to the
+// network.
+type ConnectOptions struct {
+ Container string
+ EndpointConfig *EndpointSettings `json:",omitempty"`
+}
+
+// DisconnectOptions represents the data to be used to disconnect a container
+// from the network.
+type DisconnectOptions struct {
+ Container string
+ Force bool
+}
+
+// Inspect is the body of the "get network" http response message.
+type Inspect struct {
+ Name string // Name is the name of the network
+ ID string `json:"Id"` // ID uniquely identifies a network on a single machine
+ Created time.Time // Created is the time the network created
+ Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
+ Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
+ EnableIPv4 bool // EnableIPv4 represents whether IPv4 is enabled
+ EnableIPv6 bool // EnableIPv6 represents whether IPv6 is enabled
+ IPAM IPAM // IPAM is the network's IP Address Management
+ Internal bool // Internal represents if the network is used internal only
+ Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
+ Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
+ ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
+ ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
+ Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
+ Options map[string]string // Options holds the network specific options to use for when creating the network
+ Labels map[string]string // Labels holds metadata specific to the network being created
+ Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
+ Services map[string]ServiceInfo `json:",omitempty"`
+}
+
+// Summary is used as response when listing networks. It currently is an alias
+// for [Inspect], but may diverge in the future, as not all information may
+// be included when listing networks.
+type Summary = Inspect
+
+// Address represents an IP address
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
+// PeerInfo represents one peer of an overlay network
+type PeerInfo struct {
+ Name string
+ IP string
+}
+
+// Task carries the information about one backend task
+type Task struct {
+ Name string
+ EndpointID string
+ EndpointIP string
+ Info map[string]string
+}
+
+// ServiceInfo represents service parameters with the list of service's tasks
+type ServiceInfo struct {
+ VIP string
+ Ports []string
+ LocalLBIndex int
+ Tasks []Task
+}
+
+// EndpointResource contains network resources allocated and used for a
+// container in a network.
+type EndpointResource struct {
+ Name string
+ EndpointID string
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
+}
+
+// ConfigReference specifies the source which provides a network's configuration
+type ConfigReference struct {
+ Network string
+}
+
+var acceptedFilters = map[string]bool{
+ "dangling": true,
+ "driver": true,
+ "id": true,
+ "label": true,
+ "name": true,
+ "scope": true,
+ "type": true,
+}
+
+// ValidateFilters validates the list of filter args with the available filters.
+func ValidateFilters(filter filters.Args) error {
+ return filter.Validate(acceptedFilters)
+}
+
+// PruneReport contains the response for Engine API:
+// POST "/networks/prune"
+type PruneReport struct {
+ NetworksDeleted []string
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go
new file mode 100644
index 0000000..abae48b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin.go
@@ -0,0 +1,203 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Plugin A plugin for the Engine API
+// swagger:model Plugin
+type Plugin struct {
+
+ // config
+ // Required: true
+ Config PluginConfig `json:"Config"`
+
+ // True if the plugin is running. False if the plugin is not running, only installed.
+ // Required: true
+ Enabled bool `json:"Enabled"`
+
+ // Id
+ ID string `json:"Id,omitempty"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // plugin remote reference used to push/pull the plugin
+ PluginReference string `json:"PluginReference,omitempty"`
+
+ // settings
+ // Required: true
+ Settings PluginSettings `json:"Settings"`
+}
+
+// PluginConfig The config of a plugin.
+// swagger:model PluginConfig
+type PluginConfig struct {
+
+ // args
+ // Required: true
+ Args PluginConfigArgs `json:"Args"`
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // Docker Version used to create the plugin
+ DockerVersion string `json:"DockerVersion,omitempty"`
+
+ // documentation
+ // Required: true
+ Documentation string `json:"Documentation"`
+
+ // entrypoint
+ // Required: true
+ Entrypoint []string `json:"Entrypoint"`
+
+ // env
+ // Required: true
+ Env []PluginEnv `json:"Env"`
+
+ // interface
+ // Required: true
+ Interface PluginConfigInterface `json:"Interface"`
+
+ // ipc host
+ // Required: true
+ IpcHost bool `json:"IpcHost"`
+
+ // linux
+ // Required: true
+ Linux PluginConfigLinux `json:"Linux"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+
+ // network
+ // Required: true
+ Network PluginConfigNetwork `json:"Network"`
+
+ // pid host
+ // Required: true
+ PidHost bool `json:"PidHost"`
+
+ // propagated mount
+ // Required: true
+ PropagatedMount string `json:"PropagatedMount"`
+
+ // user
+ User PluginConfigUser `json:"User,omitempty"`
+
+ // work dir
+ // Required: true
+ WorkDir string `json:"WorkDir"`
+
+ // rootfs
+ Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
+}
+
+// PluginConfigArgs plugin config args
+// swagger:model PluginConfigArgs
+type PluginConfigArgs struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value []string `json:"Value"`
+}
+
+// PluginConfigInterface The interface between Docker and the plugin
+// swagger:model PluginConfigInterface
+type PluginConfigInterface struct {
+
+ // Protocol to use for clients connecting to the plugin.
+ ProtocolScheme string `json:"ProtocolScheme,omitempty"`
+
+ // socket
+ // Required: true
+ Socket string `json:"Socket"`
+
+ // types
+ // Required: true
+ Types []PluginInterfaceType `json:"Types"`
+}
+
+// PluginConfigLinux plugin config linux
+// swagger:model PluginConfigLinux
+type PluginConfigLinux struct {
+
+ // allow all devices
+ // Required: true
+ AllowAllDevices bool `json:"AllowAllDevices"`
+
+ // capabilities
+ // Required: true
+ Capabilities []string `json:"Capabilities"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+}
+
+// PluginConfigNetwork plugin config network
+// swagger:model PluginConfigNetwork
+type PluginConfigNetwork struct {
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
+
+// PluginConfigRootfs plugin config rootfs
+// swagger:model PluginConfigRootfs
+type PluginConfigRootfs struct {
+
+ // diff ids
+ DiffIds []string `json:"diff_ids"`
+
+ // type
+ Type string `json:"type,omitempty"`
+}
+
+// PluginConfigUser plugin config user
+// swagger:model PluginConfigUser
+type PluginConfigUser struct {
+
+ // g ID
+ GID uint32 `json:"GID,omitempty"`
+
+ // UID
+ UID uint32 `json:"UID,omitempty"`
+}
+
+// PluginSettings Settings that can be modified by users.
+// swagger:model PluginSettings
+type PluginSettings struct {
+
+ // args
+ // Required: true
+ Args []string `json:"Args"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+
+ // env
+ // Required: true
+ Env []string `json:"Env"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go
new file mode 100644
index 0000000..5699010
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_device.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginDevice plugin device
+// swagger:model PluginDevice
+type PluginDevice struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // path
+ // Required: true
+ Path *string `json:"Path"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go
new file mode 100644
index 0000000..32962dc
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_env.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginEnv plugin env
+// swagger:model PluginEnv
+type PluginEnv struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value *string `json:"Value"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
new file mode 100644
index 0000000..c82f204
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
@@ -0,0 +1,21 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginInterfaceType plugin interface type
+// swagger:model PluginInterfaceType
+type PluginInterfaceType struct {
+
+ // capability
+ // Required: true
+ Capability string `json:"Capability"`
+
+ // prefix
+ // Required: true
+ Prefix string `json:"Prefix"`
+
+ // version
+ // Required: true
+ Version string `json:"Version"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go
new file mode 100644
index 0000000..5c031cf
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go
@@ -0,0 +1,37 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginMount plugin mount
+// swagger:model PluginMount
+type PluginMount struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // destination
+ // Required: true
+ Destination string `json:"Destination"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // options
+ // Required: true
+ Options []string `json:"Options"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // source
+ // Required: true
+ Source *string `json:"Source"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go
new file mode 100644
index 0000000..60d1fb5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go
@@ -0,0 +1,71 @@
+package types // import "github.com/docker/docker/api/types"
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// PluginsListResponse contains the response for the Engine API
+type PluginsListResponse []*Plugin
+
+// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
+func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
+ versionIndex := len(p)
+ prefixIndex := 0
+ if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
+ return fmt.Errorf("%q is not a plugin interface type", p)
+ }
+ p = p[1 : len(p)-1]
+loop:
+ for i, b := range p {
+ switch b {
+ case '.':
+ prefixIndex = i
+ case '/':
+ versionIndex = i
+ break loop
+ }
+ }
+ t.Prefix = string(p[:prefixIndex])
+ t.Capability = string(p[prefixIndex+1 : versionIndex])
+ if versionIndex < len(p) {
+ t.Version = string(p[versionIndex+1:])
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler for PluginInterfaceType
+func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String implements fmt.Stringer for PluginInterfaceType
+func (t PluginInterfaceType) String() string {
+ return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string
+ Description string
+ Value []string
+}
+
+// PluginPrivileges is a list of PluginPrivilege
+type PluginPrivileges []PluginPrivilege
+
+func (s PluginPrivileges) Len() int {
+ return len(s)
+}
+
+func (s PluginPrivileges) Less(i, j int) bool {
+ return s[i].Name < s[j].Name
+}
+
+func (s PluginPrivileges) Swap(i, j int) {
+ sort.Strings(s[i].Value)
+ sort.Strings(s[j].Value)
+ s[i], s[j] = s[j], s[i]
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
new file mode 100644
index 0000000..ebd5e4b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go
@@ -0,0 +1,109 @@
+package registry // import "github.com/docker/docker/api/types/registry"
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// AuthHeader is the name of the header used to send encoded registry
+// authorization credentials for registry operations (push/pull).
+const AuthHeader = "X-Registry-Auth"
+
+// RequestAuthConfig is a function interface that clients can supply
+// to retry operations after getting an authorization error.
+//
+// The function must return the [AuthHeader] value ([AuthConfig]), encoded
+// in base64url format ([RFC4648, section 5]), which can be decoded by
+// [DecodeAuthConfig].
+//
+// It must return an error if the privilege request fails.
+//
+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
+type RequestAuthConfig func(context.Context) (string, error)
+
+// AuthConfig contains authorization information for connecting to a Registry.
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
+
+// EncodeAuthConfig serializes the auth configuration as a base64url encoded
+// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header.
+//
+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
+func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
+ buf, err := json.Marshal(authConfig)
+ if err != nil {
+ return "", errInvalidParameter{err}
+ }
+ return base64.URLEncoding.EncodeToString(buf), nil
+}
+
+// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON
+// authentication information as sent through the X-Registry-Auth header.
+//
+// This function always returns an [AuthConfig], even if an error occurs. It is up
+// to the caller to decide if authentication is required, and if the error can
+// be ignored.
+//
+// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
+func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
+ if authEncoded == "" {
+ return &AuthConfig{}, nil
+ }
+
+ authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
+ return decodeAuthConfigFromReader(authJSON)
+}
+
+// DecodeAuthConfigBody decodes authentication information as sent as JSON in the
+// body of a request. This function is to provide backward compatibility with old
+// clients and API versions. Current clients and API versions expect authentication
+// to be provided through the X-Registry-Auth header.
+//
+// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an
+// error occurs. It is up to the caller to decide if authentication is required,
+// and if the error can be ignored.
+func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
+ return decodeAuthConfigFromReader(rdr)
+}
+
+func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) {
+ authConfig := &AuthConfig{}
+ if err := json.NewDecoder(rdr).Decode(authConfig); err != nil {
+ // always return an (empty) AuthConfig to increase compatibility with
+ // the existing API.
+ return &AuthConfig{}, invalid(err)
+ }
+ return authConfig, nil
+}
+
+func invalid(err error) error {
+ return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)}
+}
+
+type errInvalidParameter struct{ error }
+
+func (errInvalidParameter) InvalidParameter() {}
+
+func (e errInvalidParameter) Cause() error { return e.error }
+
+func (e errInvalidParameter) Unwrap() error { return e.error }
diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
new file mode 100644
index 0000000..f0a2113
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
@@ -0,0 +1,21 @@
+package registry // import "github.com/docker/docker/api/types/registry"
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// AuthenticateOKBody authenticate o k body
+// swagger:model AuthenticateOKBody
+type AuthenticateOKBody struct {
+
+ // An opaque token used to authenticate a user after a successful login
+ // Required: true
+ IdentityToken string `json:"IdentityToken"`
+
+ // The status of the authentication
+ // Required: true
+ Status string `json:"Status"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
new file mode 100644
index 0000000..8117cb0
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -0,0 +1,116 @@
+package registry // import "github.com/docker/docker/api/types/registry"
+
+import (
+ "encoding/json"
+ "net"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ServiceConfig stores daemon registry services configuration.
+type ServiceConfig struct {
+ AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release.
+ AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release.
+
+ InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
+ IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
+ Mirrors []string
+}
+
+// MarshalJSON implements a custom marshaler to include legacy fields
+// in API responses.
+func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
+ tmp := map[string]interface{}{
+ "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
+ "IndexConfigs": sc.IndexConfigs,
+ "Mirrors": sc.Mirrors,
+ }
+ if sc.AllowNondistributableArtifactsCIDRs != nil {
+ tmp["AllowNondistributableArtifactsCIDRs"] = nil
+ }
+ if sc.AllowNondistributableArtifactsHostnames != nil {
+ tmp["AllowNondistributableArtifactsHostnames"] = nil
+ }
+ return json.Marshal(tmp)
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON
+type NetIPNet net.IPNet
+
+// String returns the CIDR notation of ipnet
+func (ipnet *NetIPNet) String() string {
+ return (*net.IPNet)(ipnet).String()
+}
+
+// MarshalJSON returns the JSON representation of the IPNet
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error {
+ var ipnetStr string
+ if err := json.Unmarshal(b, &ipnetStr); err != nil {
+ return err
+ }
+ _, cidr, err := net.ParseCIDR(ipnetStr)
+ if err != nil {
+ return err
+ }
+ *ipnet = NetIPNet(*cidr)
+ return nil
+}
+
+// IndexInfo contains information about a registry
+//
+// RepositoryInfo Examples:
+//
+// {
+// "Index" : {
+// "Name" : "docker.io",
+// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
+// "Secure" : true,
+// "Official" : true,
+// },
+// "RemoteName" : "library/debian",
+// "LocalName" : "debian",
+// "CanonicalName" : "docker.io/debian"
+// "Official" : true,
+// }
+//
+// {
+// "Index" : {
+// "Name" : "127.0.0.1:5000",
+// "Mirrors" : [],
+// "Secure" : false,
+// "Official" : false,
+// },
+// "RemoteName" : "user/repo",
+// "LocalName" : "127.0.0.1:5000/user/repo",
+// "CanonicalName" : "127.0.0.1:5000/user/repo",
+// "Official" : false,
+// }
+type IndexInfo struct {
+ // Name is the name of the registry, such as "docker.io"
+ Name string
+ // Mirrors is a list of mirrors, expressed as URIs
+ Mirrors []string
+ // Secure is set to false if the registry is part of the list of
+ // insecure registries. Insecure registries accept HTTP and/or accept
+ // HTTPS with certificates from unknown CAs.
+ Secure bool
+ // Official indicates whether this is an official registry
+ Official bool
+}
+
+// DistributionInspect describes the result obtained from contacting the
+// registry to retrieve image metadata
+type DistributionInspect struct {
+ // Descriptor contains information about the manifest, including
+ // the content addressable digest
+ Descriptor ocispec.Descriptor
+ // Platforms contains the list of platforms supported by the image,
+ // obtained by parsing the manifest
+ Platforms []ocispec.Platform
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go
new file mode 100644
index 0000000..994ca4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/search.go
@@ -0,0 +1,48 @@
+package registry
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+// SearchOptions holds parameters to search images with.
+type SearchOptions struct {
+ RegistryAuth string
+
+ // PrivilegeFunc is a function that clients can supply to retry operations
+ // after getting an authorization error. This function returns the registry
+ // authentication header value in base64 encoded format, or an error if the
+ // privilege request fails.
+ //
+ // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
+ PrivilegeFunc func(context.Context) (string, error)
+ Filters filters.Args
+ Limit int
+}
+
+// SearchResult describes a search result returned from a registry
+type SearchResult struct {
+ // StarCount indicates the number of stars this repository has
+ StarCount int `json:"star_count"`
+ // IsOfficial is true if the result is from an official repository.
+ IsOfficial bool `json:"is_official"`
+ // Name is the name of the repository
+ Name string `json:"name"`
+ // IsAutomated indicates whether the result is automated.
+ //
+ // Deprecated: the "is_automated" field is deprecated and will always be "false".
+ IsAutomated bool `json:"is_automated"`
+ // Description is a textual description of the repository
+ Description string `json:"description"`
+}
+
+// SearchResults lists a collection search results returned from a registry
+type SearchResults struct {
+ // Query contains the query string that generated the search results
+ Query string `json:"query"`
+ // NumResults indicates the number of results the query returned
+ NumResults int `json:"num_results"`
+ // Results is a slice containing the actual results for the search
+ Results []SearchResult `json:"results"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/storage/driver_data.go b/vendor/github.com/docker/docker/api/types/storage/driver_data.go
new file mode 100644
index 0000000..009e213
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/storage/driver_data.go
@@ -0,0 +1,23 @@
+package storage
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// DriverData Information about the storage driver used to store the container's and
+// image's filesystem.
+//
+// swagger:model DriverData
+type DriverData struct {
+
+ // Low-level storage metadata, provided as key/value pairs.
+ //
+ // This information is driver-specific, and depends on the storage-driver
+ // in use, and should be used for informational purposes only.
+ //
+ // Required: true
+ Data map[string]string `json:"Data"`
+
+ // Name of the storage driver.
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
new file mode 100644
index 0000000..82921ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice // import "github.com/docker/docker/api/types/strslice"
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go
new file mode 100644
index 0000000..5ded7db
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/common.go
@@ -0,0 +1,48 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import (
+ "strconv"
+ "time"
+)
+
+// Version represents the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// String implements fmt.Stringer interface.
+func (v Version) String() string {
+ return strconv.FormatUint(v.Index, 10)
+}
+
+// Meta is a base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:"Labels"`
+}
+
+// Driver represents a driver (network, logging, secrets backend).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TLSInfo represents the TLS information about what CA certificate is trusted,
+// and who the issuer for a TLS certificate is
+type TLSInfo struct {
+ // TrustRoot is the trusted CA root certificate in PEM format
+ TrustRoot string `json:",omitempty"`
+
+ // CertIssuer is the raw subject bytes of the issuer
+ CertIssuerSubject []byte `json:",omitempty"`
+
+ // CertIssuerPublicKey is the raw public key bytes of the issuer
+ CertIssuerPublicKey []byte `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go
new file mode 100644
index 0000000..f9a6518
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/config.go
@@ -0,0 +1,46 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import "os"
+
+// Config represents a config.
+type Config struct {
+ ID string
+ Meta
+ Spec ConfigSpec
+}
+
+// ConfigSpec represents a config specification from a config in swarm
+type ConfigSpec struct {
+ Annotations
+
+ // Data is the data to store as a config.
+ //
+ // The maximum allowed size is 1000KB, as defined in [MaxConfigSize].
+ //
+ // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize
+ Data []byte `json:",omitempty"`
+
+ // Templating controls whether and how to evaluate the config payload as
+ // a template. If it is not set, no templating is used.
+ Templating *Driver `json:",omitempty"`
+}
+
+// ConfigReferenceFileTarget is a file target in a config reference
+type ConfigReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// ConfigReferenceRuntimeTarget is a target for a config specifying that it
+// isn't mounted into the container but instead has some other purpose.
+type ConfigReferenceRuntimeTarget struct{}
+
+// ConfigReference is a reference to a config in swarm
+type ConfigReference struct {
+ File *ConfigReferenceFileTarget `json:",omitempty"`
+ Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
+ ConfigID string
+ ConfigName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
new file mode 100644
index 0000000..30e3de7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -0,0 +1,119 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+)
+
+// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
+// Detailed documentation is available in:
+// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+// `nameserver`, `search`, `options` have been supported.
+// TODO: `domain` is not supported yet.
+type DNSConfig struct {
+ // Nameservers specifies the IP addresses of the name servers
+ Nameservers []string `json:",omitempty"`
+ // Search specifies the search list for host-name lookup
+ Search []string `json:",omitempty"`
+ // Options allows certain internal resolver variables to be modified
+ Options []string `json:",omitempty"`
+}
+
+// SELinuxContext contains the SELinux labels of the container.
+type SELinuxContext struct {
+ Disable bool
+
+ User string
+ Role string
+ Type string
+ Level string
+}
+
+// SeccompMode is the type used for the enumeration of possible seccomp modes
+// in SeccompOpts
+type SeccompMode string
+
+const (
+ SeccompModeDefault SeccompMode = "default"
+ SeccompModeUnconfined SeccompMode = "unconfined"
+ SeccompModeCustom SeccompMode = "custom"
+)
+
+// SeccompOpts defines the options for configuring seccomp on a swarm-managed
+// container.
+type SeccompOpts struct {
+ // Mode is the SeccompMode used for the container.
+ Mode SeccompMode `json:",omitempty"`
+ // Profile is the custom seccomp profile as a json object to be used with
+ // the container. Mode should be set to SeccompModeCustom when using a
+ // custom profile in this manner.
+ Profile []byte `json:",omitempty"`
+}
+
+// AppArmorMode is type used for the enumeration of possible AppArmor modes in
+// AppArmorOpts
+type AppArmorMode string
+
+const (
+ AppArmorModeDefault AppArmorMode = "default"
+ AppArmorModeDisabled AppArmorMode = "disabled"
+)
+
+// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed
+// container. Currently, custom AppArmor profiles are not supported.
+type AppArmorOpts struct {
+ Mode AppArmorMode `json:",omitempty"`
+}
+
+// CredentialSpec for managed service account (Windows only)
+type CredentialSpec struct {
+ Config string
+ File string
+ Registry string
+}
+
+// Privileges defines the security options for the container.
+type Privileges struct {
+ CredentialSpec *CredentialSpec
+ SELinuxContext *SELinuxContext
+ Seccomp *SeccompOpts `json:",omitempty"`
+ AppArmor *AppArmorOpts `json:",omitempty"`
+ NoNewPrivileges bool
+}
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Hostname string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Groups []string `json:",omitempty"`
+ Privileges *Privileges `json:",omitempty"`
+ Init *bool `json:",omitempty"`
+ StopSignal string `json:",omitempty"`
+ TTY bool `json:",omitempty"`
+ OpenStdin bool `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Mounts []mount.Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+ Healthcheck *container.HealthConfig `json:",omitempty"`
+ // The format of extra hosts on swarmkit is specified in:
+ // http://man7.org/linux/man-pages/man5/hosts.5.html
+ // IP_address canonical_hostname [aliases...]
+ Hosts []string `json:",omitempty"`
+ DNSConfig *DNSConfig `json:",omitempty"`
+ Secrets []*SecretReference `json:",omitempty"`
+ Configs []*ConfigReference `json:",omitempty"`
+ Isolation container.Isolation `json:",omitempty"`
+ Sysctls map[string]string `json:",omitempty"`
+ CapabilityAdd []string `json:",omitempty"`
+ CapabilityDrop []string `json:",omitempty"`
+ Ulimits []*container.Ulimit `json:",omitempty"`
+ OomScoreAdj int64 `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go
new file mode 100644
index 0000000..98ef328
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/network.go
@@ -0,0 +1,121 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import (
+ "github.com/docker/docker/api/types/network"
+)
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ // TargetPort is the port inside the container
+ TargetPort uint32 `json:",omitempty"`
+ // PublishedPort is the port on the swarm hosts
+ PublishedPort uint32 `json:",omitempty"`
+ // PublishMode is the mode in which port is published
+ PublishMode PortConfigPublishMode `json:",omitempty"`
+}
+
+// PortConfigPublishMode represents the mode in which the port is to
+// be published.
+type PortConfigPublishMode string
+
+const (
+ // PortConfigPublishModeIngress is used for ports published
+ // for ingress load balancing using routing mesh.
+ PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
+ // PortConfigPublishModeHost is used for ports published
+ // for direct host level access on the host where the task is running.
+ PortConfigPublishModeHost PortConfigPublishMode = "host"
+)
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+ // PortConfigProtocolSCTP SCTP
+ PortConfigProtocolSCTP PortConfigProtocol = "sctp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ Attachable bool `json:",omitempty"`
+ Ingress bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+ ConfigFrom *network.ConfigReference `json:",omitempty"`
+ Scope string `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+ DriverOpts map[string]string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
new file mode 100644
index 0000000..bb98d5e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -0,0 +1,139 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `json:",omitempty"`
+ // Description encapsulates the properties of the Node as reported by the
+ // agent.
+ Description NodeDescription `json:",omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `json:",omitempty"`
+ // ManagerStatus provides the current status of the node's manager
+ // component, if the node is a manager.
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+ TLSInfo TLSInfo `json:",omitempty"`
+ CSIInfo []NodeCSIInfo `json:",omitempty"`
+}
+
+// Platform represents the platform (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// NodeCSIInfo represents information about a CSI plugin available on the node
+type NodeCSIInfo struct {
+ // PluginName is the name of the CSI plugin.
+ PluginName string `json:",omitempty"`
+ // NodeID is the ID of the node as reported by the CSI plugin. This is
+ // different from the swarm node ID.
+ NodeID string `json:",omitempty"`
+ // MaxVolumesPerNode is the maximum number of volumes that may be published
+ // to this node
+ MaxVolumesPerNode int64 `json:",omitempty"`
+ // AccessibleTopology indicates the location of this node in the CSI
+ // plugin's topology
+ AccessibleTopology *Topology `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
+
+// Topology defines the CSI topology of this node. This type is a duplicate of
+// github.com/docker/docker/api/types.Topology. Because the type definition
+// is so simple and to avoid complicated structure or circular imports, we just
+// duplicate it here. See that type for full documentation
+type Topology struct {
+ Segments map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
new file mode 100644
index 0000000..0c77403
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
@@ -0,0 +1,27 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+// RuntimeType is the type of runtime used for the TaskSpec
+type RuntimeType string
+
+// RuntimeURL is the proto type url
+type RuntimeURL string
+
+const (
+ // RuntimeContainer is the container based runtime
+ RuntimeContainer RuntimeType = "container"
+ // RuntimePlugin is the plugin based runtime
+ RuntimePlugin RuntimeType = "plugin"
+ // RuntimeNetworkAttachment is the network attachment runtime
+ RuntimeNetworkAttachment RuntimeType = "attachment"
+
+ // RuntimeURLContainer is the proto url for the container type
+ RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
+ // RuntimeURLPlugin is the proto url for the plugin type
+ RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
+)
+
+// NetworkAttachmentSpec represents the runtime spec type for network
+// attachment tasks
+type NetworkAttachmentSpec struct {
+ ContainerID string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
new file mode 100644
index 0000000..292bd7a
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
+
+package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
new file mode 100644
index 0000000..32aaf0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
@@ -0,0 +1,808 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: plugin.proto
+
+package runtime
+
+import (
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+type PluginSpec struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
+ Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"`
+ Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"`
+}
+
+func (m *PluginSpec) Reset() { *m = PluginSpec{} }
+func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
+func (*PluginSpec) ProtoMessage() {}
+func (*PluginSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_22a625af4bc1cc87, []int{0}
+}
+func (m *PluginSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PluginSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PluginSpec.Merge(m, src)
+}
+func (m *PluginSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *PluginSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_PluginSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginSpec proto.InternalMessageInfo
+
+func (m *PluginSpec) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetRemote() string {
+ if m != nil {
+ return m.Remote
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
+ if m != nil {
+ return m.Privileges
+ }
+ return nil
+}
+
+func (m *PluginSpec) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+func (m *PluginSpec) GetEnv() []string {
+ if m != nil {
+ return m.Env
+ }
+ return nil
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
+func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
+func (*PluginPrivilege) ProtoMessage() {}
+func (*PluginPrivilege) Descriptor() ([]byte, []int) {
+ return fileDescriptor_22a625af4bc1cc87, []int{1}
+}
+func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PluginPrivilege) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PluginPrivilege.Merge(m, src)
+}
+func (m *PluginPrivilege) XXX_Size() int {
+ return m.Size()
+}
+func (m *PluginPrivilege) XXX_DiscardUnknown() {
+ xxx_messageInfo_PluginPrivilege.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo
+
+func (m *PluginPrivilege) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
+ proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
+}
+
+func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) }
+
+var fileDescriptor_22a625af4bc1cc87 = []byte{
+ // 225 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
+ 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16,
+ 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60,
+ 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25,
+ 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a,
+ 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98,
+ 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14,
+ 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c,
+ 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab,
+ 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
+ 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33,
+ 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+ 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36,
+ 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00,
+ 0x00,
+}
+
+func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Env[iNdEx])
+ copy(dAtA[i:], m.Env[iNdEx])
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Disabled {
+ i--
+ if m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Privileges) > 0 {
+ for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintPlugin(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Remote) > 0 {
+ i -= len(m.Remote)
+ copy(dAtA[i:], m.Remote)
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Value[iNdEx])
+ copy(dAtA[i:], m.Value[iNdEx])
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
+ offset -= sovPlugin(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *PluginSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Remote)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Privileges) > 0 {
+ for _, e := range m.Privileges {
+ l = e.Size()
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ if m.Disabled {
+ n += 2
+ }
+ if len(m.Env) > 0 {
+ for _, s := range m.Env {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PluginPrivilege) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovPlugin(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozPlugin(x uint64) (n int) {
+ return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PluginSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Remote = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Privileges = append(m.Privileges, &PluginPrivilege{})
+ if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disabled = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPlugin(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupPlugin
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
new file mode 100644
index 0000000..e311b36
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+message PluginSpec {
+ string name = 1;
+ string remote = 2;
+ repeated PluginPrivilege privileges = 3;
+ bool disabled = 4;
+ repeated string env = 5;
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+message PluginPrivilege {
+ string name = 1;
+ string description = 2;
+ repeated string value = 3;
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go
new file mode 100644
index 0000000..aeb5bb5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go
@@ -0,0 +1,50 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import "os"
+
+// Secret represents a secret.
+type Secret struct {
+ ID string
+ Meta
+ Spec SecretSpec
+}
+
+// SecretSpec represents a secret specification from a secret in swarm
+type SecretSpec struct {
+ Annotations
+
+ // Data is the data to store as a secret. It must be empty if a
+ // [Driver] is used, in which case the data is loaded from an external
+ // secret store. The maximum allowed size is 500KB, as defined in
+ // [MaxSecretSize].
+ //
+ // This field is only used to create the secret, and is not returned
+ // by other endpoints.
+ //
+ // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize
+ Data []byte `json:",omitempty"`
+
+ // Driver is the name of the secrets driver used to fetch the secret's
+ // value from an external secret store. If not set, the default built-in
+ // store is used.
+ Driver *Driver `json:",omitempty"`
+
+ // Templating controls whether and how to evaluate the secret payload as
+ // a template. If it is not set, no templating is used.
+ Templating *Driver `json:",omitempty"`
+}
+
+// SecretReferenceFileTarget is a file target in a secret reference
+type SecretReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// SecretReference is a reference to a secret in swarm
+type SecretReference struct {
+ File *SecretReferenceFileTarget
+ SecretID string
+ SecretName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
new file mode 100644
index 0000000..5b6d5ec
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -0,0 +1,202 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ PreviousSpec *ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+ UpdateStatus *UpdateStatus `json:",omitempty"`
+
+ // ServiceStatus is an optional, extra field indicating the number of
+ // desired and running tasks. It is provided primarily as a shortcut to
+ // calculating these values client-side, which otherwise would require
+ // listing all tasks for a service, an operation that could be
+ // computation and network expensive.
+ ServiceStatus *ServiceStatus `json:",omitempty"`
+
+ // JobStatus is the status of a Service which is in one of ReplicatedJob or
+ // GlobalJob modes. It is absent on Replicated and Global services.
+ JobStatus *JobStatus `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ RollbackConfig *UpdateConfig `json:",omitempty"`
+
+ // Networks specifies which networks the service should attach to.
+ //
+ // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead.
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+ ReplicatedJob *ReplicatedJob `json:",omitempty"`
+ GlobalJob *GlobalJob `json:",omitempty"`
+}
+
+// UpdateState is the state of a service update.
+type UpdateState string
+
+const (
+ // UpdateStateUpdating is the updating state.
+ UpdateStateUpdating UpdateState = "updating"
+ // UpdateStatePaused is the paused state.
+ UpdateStatePaused UpdateState = "paused"
+ // UpdateStateCompleted is the completed state.
+ UpdateStateCompleted UpdateState = "completed"
+ // UpdateStateRollbackStarted is the state with a rollback in progress.
+ UpdateStateRollbackStarted UpdateState = "rollback_started"
+ // UpdateStateRollbackPaused is the state with a rollback in progress.
+ UpdateStateRollbackPaused UpdateState = "rollback_paused"
+ // UpdateStateRollbackCompleted is the state with a rollback in progress.
+ UpdateStateRollbackCompleted UpdateState = "rollback_completed"
+)
+
+// UpdateStatus reports the status of a service update.
+type UpdateStatus struct {
+ State UpdateState `json:",omitempty"`
+ StartedAt *time.Time `json:",omitempty"`
+ CompletedAt *time.Time `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+// ReplicatedJob is the a type of Service which executes a defined Tasks
+// in parallel until the specified number of Tasks have succeeded.
+type ReplicatedJob struct {
+ // MaxConcurrent indicates the maximum number of Tasks that should be
+ // executing simultaneously for this job at any given time. There may be
+ // fewer Tasks that MaxConcurrent executing simultaneously; for example, if
+ // there are fewer than MaxConcurrent tasks needed to reach
+ // TotalCompletions.
+ //
+ // If this field is empty, it will default to a max concurrency of 1.
+ MaxConcurrent *uint64 `json:",omitempty"`
+
+ // TotalCompletions is the total number of Tasks desired to run to
+ // completion.
+ //
+ // If this field is empty, the value of MaxConcurrent will be used.
+ TotalCompletions *uint64 `json:",omitempty"`
+}
+
+// GlobalJob is the type of a Service which executes a Task on every Node
+// matching the Service's placement constraints. These tasks run to completion
+// and then exit.
+//
+// This type is deliberately empty.
+type GlobalJob struct{}
+
+const (
+ // UpdateFailureActionPause PAUSE
+ UpdateFailureActionPause = "pause"
+ // UpdateFailureActionContinue CONTINUE
+ UpdateFailureActionContinue = "continue"
+ // UpdateFailureActionRollback ROLLBACK
+ UpdateFailureActionRollback = "rollback"
+
+ // UpdateOrderStopFirst STOP_FIRST
+ UpdateOrderStopFirst = "stop-first"
+ // UpdateOrderStartFirst START_FIRST
+ UpdateOrderStartFirst = "start-first"
+)
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64
+
+ // Amount of time between updates.
+ Delay time.Duration `json:",omitempty"`
+
+ // FailureAction is the action to take when an update failures.
+ FailureAction string `json:",omitempty"`
+
+ // Monitor indicates how long to monitor a task for failure after it is
+ // created. If the task fails by ending up in one of the states
+ // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+ // this counts as a failure. If it fails after Monitor, it does not
+ // count as a failure. If Monitor is unspecified, a default value will
+ // be used.
+ Monitor time.Duration `json:",omitempty"`
+
+ // MaxFailureRatio is the fraction of tasks that may fail during
+ // an update before the failure action is invoked. Any task created by
+ // the current update which ends up in one of the states REJECTED,
+ // COMPLETED or FAILED within Monitor from its creation counts as a
+ // failure. The number of failures is divided by the number of tasks
+ // being updated, and if this fraction is greater than
+ // MaxFailureRatio, the failure action is invoked.
+ //
+ // If the failure action is CONTINUE, there is no effect.
+ // If the failure action is PAUSE, no more tasks will be updated until
+ // another update is started.
+ MaxFailureRatio float32
+
+ // Order indicates the order of operations when rolling out an updated
+ // task. Either the old task is shut down before the new task is
+ // started, or the new task is started before the old task is shut down.
+ Order string
+}
+
+// ServiceStatus represents the number of running tasks in a service and the
+// number of tasks desired to be running.
+type ServiceStatus struct {
+ // RunningTasks is the number of tasks for the service actually in the
+ // Running state
+ RunningTasks uint64
+
+ // DesiredTasks is the number of tasks desired to be running by the
+ // service. For replicated services, this is the replica count. For global
+ // services, this is computed by taking the number of tasks with desired
+ // state of not-Shutdown.
+ DesiredTasks uint64
+
+ // CompletedTasks is the number of tasks in the state Completed, if this
+ // service is in ReplicatedJob or GlobalJob mode. This field must be
+ // cross-referenced with the service type, because the default value of 0
+ // may mean that a service is not in a job mode, or it may mean that the
+ // job has yet to complete any tasks.
+ CompletedTasks uint64
+}
+
+// JobStatus is the status of a job-type service.
+type JobStatus struct {
+ // JobIteration is a value increased each time a Job is executed,
+ // successfully or otherwise. "Executed", in this case, means the job as a
+ // whole has been started, not that an individual Task has been launched. A
+ // job is "Executed" when its ServiceSpec is updated. JobIteration can be
+ // used to disambiguate Tasks belonging to different executions of a job.
+ //
+ // Though JobIteration will increase with each subsequent execution, it may
+ // not necessarily increase by 1, and so JobIteration should not be used to
+ // keep track of the number of times a job has been executed.
+ JobIteration Version
+
+ // LastExecution is the time that the job was last executed, as observed by
+ // Swarm manager.
+ LastExecution time.Time `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go
new file mode 100644
index 0000000..9a268ff
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go
@@ -0,0 +1,20 @@
+package swarm
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ServiceCreateResponse contains the information returned to a client on the
+// creation of a new service.
+//
+// swagger:model ServiceCreateResponse
+type ServiceCreateResponse struct {
+
+ // The ID of the created service.
+ ID string `json:"ID,omitempty"`
+
+ // Optional warning message.
+ //
+ // FIXME(thaJeztah): this should have "omitempty" in the generated type.
+ //
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go
new file mode 100644
index 0000000..0417467
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go
@@ -0,0 +1,12 @@
+package swarm
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ServiceUpdateResponse service update response
+// swagger:model ServiceUpdateResponse
+type ServiceUpdateResponse struct {
+
+ // Optional warning messages
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
new file mode 100644
index 0000000..1b4be6f
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -0,0 +1,237 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import (
+ "time"
+)
+
+// ClusterInfo represents info about the cluster for outputting in "info"
+// it contains the same information as "Swarm", but without the JoinTokens
+type ClusterInfo struct {
+ ID string
+ Meta
+ Spec Spec
+ TLSInfo TLSInfo
+ RootRotationInProgress bool
+ DefaultAddrPool []string
+ SubnetSize uint32
+ DataPathPort uint32
+}
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ClusterInfo
+ JoinTokens JoinTokens
+}
+
+// JoinTokens contains the tokens workers and managers need to join the swarm.
+type JoinTokens struct {
+ // Worker is the join token workers may use to join the swarm.
+ Worker string
+ // Manager is the join token managers may use to join the swarm.
+ Manager string
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+ TaskDefaults TaskDefaults `json:",omitempty"`
+ EncryptionConfig EncryptionConfig `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit *int64 `json:",omitempty"`
+}
+
+// TaskDefaults parameterizes cluster-level task creation with default values.
+type TaskDefaults struct {
+ // LogDriver selects the log driver to use for tasks created in the
+ // orchestrator if unspecified by a service.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// EncryptionConfig controls at-rest encryption of data and keys.
+type EncryptionConfig struct {
+ // AutoLockManagers specifies whether or not managers TLS keys and raft data
+ // should be encrypted at rest in such a way that they must be unlocked
+ // before the manager node starts up again.
+ AutoLockManagers bool
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `json:",omitempty"`
+
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots *uint64 `json:",omitempty"`
+
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+
+ // ElectionTick is the number of ticks that a follower will wait for a message
+ // from the leader before becoming a candidate and starting an election.
+ // ElectionTick must be greater than HeartbeatTick.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ ElectionTick int
+
+ // HeartbeatTick is the number of ticks between heartbeats. Every
+ // HeartbeatTick ticks, the leader will send a heartbeat to the
+ // followers.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ HeartbeatTick int
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod time.Duration `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry time.Duration `json:",omitempty"`
+
+ // ExternalCAs is a list of CAs to which a manager node will make
+ // certificate signing requests for node certificates.
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+
+ // SigningCACert and SigningCAKey specify the desired signing root CA and
+ // root CA key for the swarm. When inspecting the cluster, the key will
+ // be redacted.
+ SigningCACert string `json:",omitempty"`
+ SigningCAKey string `json:",omitempty"`
+
+ // If this value changes, and there is no specified signing cert and key,
+ // then the swarm is forced to generate a new root certificate and key.
+ ForceRotate uint64 `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ // Protocol is the protocol used by this external CA.
+ Protocol ExternalCAProtocol
+
+ // URL is the URL where the external CA can be reached.
+ URL string
+
+ // Options is a set of additional key/value pairs whose interpretation
+ // depends on the specified CA type.
+ Options map[string]string `json:",omitempty"`
+
+ // CACert specifies which root CA is used by this external CA. This certificate must
+ // be in PEM format.
+ CACert string
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ DataPathPort uint32
+ ForceNewCluster bool
+ Spec Spec
+ AutoLockManagers bool
+ Availability NodeAvailability
+ DefaultAddrPool []string
+ SubnetSize uint32
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ RemoteAddrs []string
+ JoinToken string // accept by secret
+ Availability NodeAvailability
+}
+
+// UnlockRequest is the request used to unlock a swarm.
+type UnlockRequest struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+ // LocalNodeStateLocked LOCKED
+ LocalNodeStateLocked LocalNodeState = "locked"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+ NodeAddr string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int `json:",omitempty"`
+ Managers int `json:",omitempty"`
+
+ Cluster *ClusterInfo `json:",omitempty"`
+
+ Warnings []string `json:",omitempty"`
+}
+
+// Status provides information about the current swarm status and role,
+// obtained from the "Swarm" header in the API response.
+type Status struct {
+ // NodeState represents the state of the node.
+ NodeState LocalNodeState
+
+ // ControlAvailable indicates if the node is a swarm manager.
+ ControlAvailable bool
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
+
+// UpdateFlags contains flags for SwarmUpdate.
+type UpdateFlags struct {
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ RotateManagerUnlockKey bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
new file mode 100644
index 0000000..ad3eeca
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -0,0 +1,225 @@
+package swarm // import "github.com/docker/docker/api/types/swarm"
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/swarm/runtime"
+)
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+ // TaskStateRemove REMOVE
+ TaskStateRemove TaskState = "remove"
+ // TaskStateOrphaned ORPHANED
+ TaskStateOrphaned TaskState = "orphaned"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+ Annotations
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+
+ // JobIteration is the JobIteration of the Service that this Task was
+ // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
+ // used to determine which Tasks belong to which run of the job. This field
+ // is absent if the Service mode is Replicated or Global.
+ JobIteration *Version `json:",omitempty"`
+
+ // Volumes is the list of VolumeAttachments for this task. It specifies
+ // which particular volumes are to be used by this particular task, and
+ // fulfilling what mounts in the spec.
+ Volumes []VolumeAttachment
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive.
+ // PluginSpec is only used when the `Runtime` field is set to `plugin`
+ // NetworkAttachmentSpec is used if the `Runtime` field is set to
+ // `attachment`.
+ ContainerSpec *ContainerSpec `json:",omitempty"`
+ PluginSpec *runtime.PluginSpec `json:",omitempty"`
+ NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"`
+
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+
+ // ForceUpdate is a counter that triggers an update even if no relevant
+ // parameters have been changed.
+ ForceUpdate uint64
+
+ Runtime RuntimeType `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory) which can be advertised by a
+// node and requested to be reserved for a task.
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// Limit describes limits on resources which can be requested by a task.
+type Limit struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ Pids int64 `json:",omitempty"`
+}
+
+// GenericResource represents a "user defined" resource which can
+// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
+type GenericResource struct {
+ NamedResourceSpec *NamedGenericResource `json:",omitempty"`
+ DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
+}
+
+// NamedGenericResource represents a "user defined" resource which is defined
+// as a string.
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
+type NamedGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value string `json:",omitempty"`
+}
+
+// DiscreteGenericResource represents a "user defined" resource which is defined
+// as an integer
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to count the resource (SSD=5, HDD=3, ...)
+type DiscreteGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Limit `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+ Preferences []PlacementPreference `json:",omitempty"`
+ MaxReplicas uint64 `json:",omitempty"`
+
+ // Platforms stores all the platforms that the image can run on.
+ // This field is used in the platform filter for scheduling. If empty,
+ // then the platform filter is off, meaning there are no scheduling restrictions.
+ Platforms []Platform `json:",omitempty"`
+}
+
+// PlacementPreference provides a way to make the scheduler aware of factors
+// such as topology.
+type PlacementPreference struct {
+ Spread *SpreadOver
+}
+
+// SpreadOver is a scheduling preference that instructs the scheduler to spread
+// tasks evenly over groups of nodes identified by labels.
+type SpreadOver struct {
+ // label descriptor, such as engine.labels.az
+ SpreadDescriptor string
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus *ContainerStatus `json:",omitempty"`
+ PortStatus PortStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string
+ PID int
+ ExitCode int
+}
+
+// PortStatus represents the port status of a task's host ports whose
+// service has published host ports
+type PortStatus struct {
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// VolumeAttachment contains the associating a Volume to a Task.
+type VolumeAttachment struct {
+ // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId.
+ ID string `json:",omitempty"`
+
+ // Source, together with Target, indicates the Mount, as specified in the
+ // ContainerSpec, that this volume fulfills.
+ Source string `json:",omitempty"`
+
+ // Target, together with Source, indicates the Mount, as specified
+ // in the ContainerSpec, that this volume fulfills.
+ Target string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go
new file mode 100644
index 0000000..8a2444d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/system/info.go
@@ -0,0 +1,153 @@
+package system
+
+import (
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// Info contains response of Engine API:
+// GET "/info"
+type Info struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
+ KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2.
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ PidsLimit bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ LoggingDriver string
+ CgroupDriver string
+ CgroupVersion string `json:",omitempty"`
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSVersion string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *registry.ServiceConfig
+ NCPU int
+ MemTotal int64
+ GenericResources []swarm.GenericResource
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ Runtimes map[string]RuntimeWithStatus
+ DefaultRuntime string
+ Swarm swarm.Info
+ // LiveRestoreEnabled determines whether containers should be kept
+ // running when the daemon is shutdown or upon daemon start if
+ // running containers are detected
+ LiveRestoreEnabled bool
+ Isolation container.Isolation
+ InitBinary string
+ ContainerdCommit Commit
+ RuncCommit Commit
+ InitCommit Commit
+ SecurityOptions []string
+ ProductLicense string `json:",omitempty"`
+ DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
+ CDISpecDirs []string
+
+ Containerd *ContainerdInfo `json:",omitempty"`
+
+ // Warnings contains a slice of warnings that occurred while collecting
+ // system information. These warnings are intended to be informational
+ // messages for the user, and are not intended to be parsed / used for
+ // other purposes, as they do not have a fixed format.
+ Warnings []string
+}
+
+// ContainerdInfo holds information about the containerd instance used by the daemon.
+type ContainerdInfo struct {
+ // Address is the path to the containerd socket.
+ Address string `json:",omitempty"`
+ // Namespaces is the containerd namespaces used by the daemon.
+ Namespaces ContainerdNamespaces
+}
+
+// ContainerdNamespaces reflects the containerd namespaces used by the daemon.
+//
+// These namespaces can be configured in the daemon configuration, and are
+// considered to be used exclusively by the daemon,
+//
+// As these namespaces are considered to be exclusively accessed
+// by the daemon, it is not recommended to change these values,
+// or to change them to a value that is used by other systems,
+// such as cri-containerd.
+type ContainerdNamespaces struct {
+ // Containers holds the default containerd namespace used for
+ // containers managed by the daemon.
+ //
+ // The default namespace for containers is "moby", but will be
+ // suffixed with the `<uid>.<gid>` of the remapped `root` if
+ // user-namespaces are enabled and the containerd image-store
+ // is used.
+ Containers string
+
+ // Plugins holds the default containerd namespace used for
+ // plugins managed by the daemon.
+ //
+ // The default namespace for plugins is "moby", but will be
+ // suffixed with the `<uid>.<gid>` of the remapped `root` if
+ // user-namespaces are enabled and the containerd image-store
+ // is used.
+ Plugins string
+}
+
+// PluginsInfo is a temp struct holding Plugins name
+// registered with docker daemon. It is used by [Info] struct
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+ // List of Log plugins registered
+ Log []string
+}
+
+// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
+// in the version-string of external tools, such as containerd, or runC.
+type Commit struct {
+ // ID is the actual commit ID or version of external tool.
+ ID string
+
+ // Expected is the commit ID of external tool expected by dockerd as set at build time.
+ //
+ // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
+ Expected string
+}
+
+// NetworkAddressPool is a temp struct used by [Info] struct.
+type NetworkAddressPool struct {
+ Base string
+ Size int
+}
diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go
new file mode 100644
index 0000000..d077295
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/system/runtime.go
@@ -0,0 +1,20 @@
+package system
+
+// Runtime describes an OCI runtime
+type Runtime struct {
+ // "Legacy" runtime configuration for runc-compatible runtimes.
+
+ Path string `json:"path,omitempty"`
+ Args []string `json:"runtimeArgs,omitempty"`
+
+ // Shimv2 runtime configuration. Mutually exclusive with the legacy config above.
+
+ Type string `json:"runtimeType,omitempty"`
+ Options map[string]interface{} `json:"options,omitempty"`
+}
+
+// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus].
+type RuntimeWithStatus struct {
+ Runtime
+ Status map[string]string `json:"status,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go
new file mode 100644
index 0000000..edff3eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/system/security_opts.go
@@ -0,0 +1,48 @@
+package system
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// SecurityOpt contains the name and options of a security option
+type SecurityOpt struct {
+ Name string
+ Options []KeyValue
+}
+
+// DecodeSecurityOptions decodes a security options string slice to a
+// type-safe [SecurityOpt].
+func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
+ so := []SecurityOpt{}
+ for _, opt := range opts {
+ // support output from a < 1.13 docker daemon
+ if !strings.Contains(opt, "=") {
+ so = append(so, SecurityOpt{Name: opt})
+ continue
+ }
+ secopt := SecurityOpt{}
+ for _, s := range strings.Split(opt, ",") {
+ k, v, ok := strings.Cut(s, "=")
+ if !ok {
+ return nil, fmt.Errorf("invalid security option %q", s)
+ }
+ if k == "" || v == "" {
+ return nil, errors.New("invalid empty security option")
+ }
+ if k == "name" {
+ secopt.Name = v
+ continue
+ }
+ secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v})
+ }
+ so = append(so, secopt)
+ }
+ return so, nil
+}
+
+// KeyValue holds a key/value pair.
+type KeyValue struct {
+ Key, Value string
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
new file mode 100644
index 0000000..cab5c32
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -0,0 +1,131 @@
+package time // import "github.com/docker/docker/api/types/time"
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ if _, _, err := parseTimestamp(value); err != nil {
+ return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
+ }
+ return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has
+// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())).
+// If the incoming nanosecond portion is longer than 9 digits it is truncated.
+// The expectation is that the seconds and nanoseconds will be used to create a
+// time variable. For example:
+//
+// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0)
+// since := time.Unix(seconds, nanoseconds)
+//
+// returns seconds as defaultSeconds if value == ""
+func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) {
+ if value == "" {
+ return defaultSeconds, 0, nil
+ }
+ return parseTimestamp(value)
+}
+
+func parseTimestamp(value string) (sec int64, nsec int64, err error) {
+ s, n, ok := strings.Cut(value, ".")
+ sec, err = strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return sec, 0, err
+ }
+ if !ok {
+ return sec, 0, nil
+ }
+ nsec, err = strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ return sec, nsec, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n))))
+ return sec, nsec, nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
new file mode 100644
index 0000000..82ae339
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -0,0 +1,179 @@
+package types // import "github.com/docker/docker/api/types"
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/volume"
+)
+
+const (
+ // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams
+ MediaTypeRawStream = "application/vnd.docker.raw-stream"
+
+ // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams
+ MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream"
+)
+
+// Ping contains response of Engine API:
+// GET "/_ping"
+type Ping struct {
+ APIVersion string
+ OSType string
+ Experimental bool
+ BuilderVersion BuilderVersion
+
+ // SwarmStatus provides information about the current swarm status of the
+ // engine, obtained from the "Swarm" header in the API response.
+ //
+ // It can be a nil struct if the API version does not provide this header
+ // in the ping response, or if an error occurred, in which case the client
+ // should use other ways to get the current swarm status, such as the /swarm
+ // endpoint.
+ SwarmStatus *swarm.Status
+}
+
+// ComponentVersion describes the version information for a specific component.
+type ComponentVersion struct {
+ Name string
+ Version string
+ Details map[string]string `json:",omitempty"`
+}
+
+// Version contains response of Engine API:
+// GET "/version"
+type Version struct {
+ Platform struct{ Name string } `json:",omitempty"`
+ Components []ComponentVersion `json:",omitempty"`
+
+ // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
+
+ Version string
+ APIVersion string `json:"ApiVersion"`
+ MinAPIVersion string `json:"MinAPIVersion,omitempty"`
+ GitCommit string
+ GoVersion string
+ Os string
+ Arch string
+ KernelVersion string `json:",omitempty"`
+ Experimental bool `json:",omitempty"`
+ BuildTime string `json:",omitempty"`
+}
+
+// DiskUsageObject represents an object type used for disk usage query filtering.
+type DiskUsageObject string
+
+const (
+ // ContainerObject represents a container DiskUsageObject.
+ ContainerObject DiskUsageObject = "container"
+ // ImageObject represents an image DiskUsageObject.
+ ImageObject DiskUsageObject = "image"
+ // VolumeObject represents a volume DiskUsageObject.
+ VolumeObject DiskUsageObject = "volume"
+ // BuildCacheObject represents a build-cache DiskUsageObject.
+ BuildCacheObject DiskUsageObject = "build-cache"
+)
+
+// DiskUsageOptions holds parameters for system disk usage query.
+type DiskUsageOptions struct {
+ // Types specifies what object types to include in the response. If empty,
+ // all object types are returned.
+ Types []DiskUsageObject
+}
+
+// DiskUsage contains response of Engine API:
+// GET "/system/df"
+type DiskUsage struct {
+ LayersSize int64
+ Images []*image.Summary
+ Containers []*container.Summary
+ Volumes []*volume.Volume
+ BuildCache []*BuildCache
+ BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
+}
+
+// BuildCachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+type BuildCachePruneReport struct {
+ CachesDeleted []string
+ SpaceReclaimed uint64
+}
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+type SecretCreateResponse struct {
+ // ID is the id of the created secret.
+ ID string
+}
+
+// SecretListOptions holds parameters to list secrets
+type SecretListOptions struct {
+ Filters filters.Args
+}
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+type ConfigCreateResponse struct {
+ // ID is the id of the created config.
+ ID string
+}
+
+// ConfigListOptions holds parameters to list configs
+type ConfigListOptions struct {
+ Filters filters.Args
+}
+
+// PushResult contains the tag, manifest digest, and manifest size from the
+// push. It's used to signal this information to the trust code in the client
+// so it can sign the manifest if necessary.
+type PushResult struct {
+ Tag string
+ Digest string
+ Size int
+}
+
+// BuildResult contains the image id of a successful build
+type BuildResult struct {
+ ID string
+}
+
+// BuildCache contains information about a build cache record.
+type BuildCache struct {
+ // ID is the unique ID of the build cache record.
+ ID string
+ // Parent is the ID of the parent build cache record.
+ //
+ // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
+ Parent string `json:"Parent,omitempty"`
+ // Parents is the list of parent build cache record IDs.
+ Parents []string `json:" Parents,omitempty"`
+ // Type is the cache record type.
+ Type string
+ // Description is a description of the build-step that produced the build cache.
+ Description string
+ // InUse indicates if the build cache is in use.
+ InUse bool
+ // Shared indicates if the build cache is shared.
+ Shared bool
+ // Size is the amount of disk space used by the build cache (in bytes).
+ Size int64
+ // CreatedAt is the date and time at which the build cache was created.
+ CreatedAt time.Time
+ // LastUsedAt is the date and time at which the build cache was last used.
+ LastUsedAt *time.Time
+ UsageCount int
+}
+
+// BuildCachePruneOptions hold parameters to prune the build cache
+type BuildCachePruneOptions struct {
+ All bool
+ ReservedSpace int64
+ MaxUsedSpace int64
+ MinFreeSpace int64
+ Filters filters.Args
+
+ KeepStorage int64 // Deprecated: deprecated in API 1.48.
+}
diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go
new file mode 100644
index 0000000..93e4336
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go
@@ -0,0 +1,115 @@
+package types
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/common"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/storage"
+)
+
+// IDResponse Response to an API call that returns just an Id.
+//
+// Deprecated: use either [container.CommitResponse] or [container.ExecCreateResponse]. It will be removed in the next release.
+type IDResponse = common.IDResponse
+
+// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json"
+// for API version 1.18 and older.
+//
+// Deprecated: use [container.InspectResponse] or [container.ContainerJSONBase]. It will be removed in the next release.
+type ContainerJSONBase = container.ContainerJSONBase
+
+// ContainerJSON is the response for the GET "/containers/{name:.*}/json"
+// endpoint.
+//
+// Deprecated: use [container.InspectResponse]. It will be removed in the next release.
+type ContainerJSON = container.InspectResponse
+
+// Container contains response of Engine API:
+// GET "/containers/json"
+//
+// Deprecated: use [container.Summary].
+type Container = container.Summary
+
+// ContainerState stores container's running state
+//
+// Deprecated: use [container.State].
+type ContainerState = container.State
+
+// NetworkSettings exposes the network settings in the api.
+//
+// Deprecated: use [container.NetworkSettings].
+type NetworkSettings = container.NetworkSettings
+
+// NetworkSettingsBase holds networking state for a container when inspecting it.
+//
+// Deprecated: use [container.NetworkSettingsBase].
+type NetworkSettingsBase = container.NetworkSettingsBase
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+//
+// Deprecated: use [container.DefaultNetworkSettings].
+type DefaultNetworkSettings = container.DefaultNetworkSettings
+
+// SummaryNetworkSettings provides a summary of container's networks
+// in /containers/json.
+//
+// Deprecated: use [container.NetworkSettingsSummary].
+type SummaryNetworkSettings = container.NetworkSettingsSummary
+
+// Health states
+const (
+ NoHealthcheck = container.NoHealthcheck // Deprecated: use [container.NoHealthcheck].
+ Starting = container.Starting // Deprecated: use [container.Starting].
+ Healthy = container.Healthy // Deprecated: use [container.Healthy].
+ Unhealthy = container.Unhealthy // Deprecated: use [container.Unhealthy].
+)
+
+// Health stores information about the container's healthcheck results.
+//
+// Deprecated: use [container.Health].
+type Health = container.Health
+
+// HealthcheckResult stores information about a single run of a healthcheck probe.
+//
+// Deprecated: use [container.HealthcheckResult].
+type HealthcheckResult = container.HealthcheckResult
+
+// MountPoint represents a mount point configuration inside the container.
+// This is used for reporting the mountpoints in use by a container.
+//
+// Deprecated: use [container.MountPoint].
+type MountPoint = container.MountPoint
+
+// Port An open port on a container
+//
+// Deprecated: use [container.Port].
+type Port = container.Port
+
+// GraphDriverData Information about the storage driver used to store the container's and
+// image's filesystem.
+//
+// Deprecated: use [storage.DriverData].
+type GraphDriverData = storage.DriverData
+
+// RootFS returns Image's RootFS description including the layer IDs.
+//
+// Deprecated: use [image.RootFS].
+type RootFS = image.RootFS
+
+// ImageInspect contains response of Engine API:
+// GET "/images/{name:.*}/json"
+//
+// Deprecated: use [image.InspectResponse].
+type ImageInspect = image.InspectResponse
+
+// RequestPrivilegeFunc is a function interface that clients can supply to
+// retry operations after getting an authorization error.
+// This function returns the registry authentication header value in base64
+// format, or an error if the privilege request fails.
+//
+// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
+type RequestPrivilegeFunc func(context.Context) (string, error)
diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go
new file mode 100644
index 0000000..621725a
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/versions/compare.go
@@ -0,0 +1,65 @@
+package versions // import "github.com/docker/docker/api/types/versions"
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ if v1 == v2 {
+ return 0
+ }
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ maxVer := len(currTab)
+ if len(otherTab) > maxVer {
+ maxVer = len(otherTab)
+ }
+ for i := 0; i < maxVer; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
new file mode 100644
index 0000000..618a481
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
@@ -0,0 +1,420 @@
+package volume
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ClusterVolume contains options and information specific to, and only present
+// on, Swarm CSI cluster volumes.
+type ClusterVolume struct {
+ // ID is the Swarm ID of the volume. Because cluster volumes are Swarm
+ // objects, they have an ID, unlike non-cluster volumes, which only have a
+ // Name. This ID can be used to refer to the cluster volume.
+ ID string
+
+ // Meta is the swarm metadata about this volume.
+ swarm.Meta
+
+ // Spec is the cluster-specific options from which this volume is derived.
+ Spec ClusterVolumeSpec
+
+ // PublishStatus contains the status of the volume as it pertains to its
+ // publishing on Nodes.
+ PublishStatus []*PublishStatus `json:",omitempty"`
+
+ // Info is information about the global status of the volume.
+ Info *Info `json:",omitempty"`
+}
+
+// ClusterVolumeSpec contains the spec used to create this volume.
+type ClusterVolumeSpec struct {
+ // Group defines the volume group of this volume. Volumes belonging to the
+ // same group can be referred to by group name when creating Services.
+ // Referring to a volume by group instructs swarm to treat volumes in that
+ // group interchangeably for the purpose of scheduling. Volumes with an
+ // empty string for a group technically all belong to the same, emptystring
+ // group.
+ Group string `json:",omitempty"`
+
+ // AccessMode defines how the volume is used by tasks.
+ AccessMode *AccessMode `json:",omitempty"`
+
+ // AccessibilityRequirements specifies where in the cluster a volume must
+ // be accessible from.
+ //
+ // This field must be empty if the plugin does not support
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
+ // plugin does not support it, volume will not be created.
+ //
+ // If AccessibilityRequirements is empty, but the plugin does support
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
+ // cluster is a valid target for the volume.
+ AccessibilityRequirements *TopologyRequirement `json:",omitempty"`
+
+ // CapacityRange defines the desired capacity that the volume should be
+ // created with. If nil, the plugin will decide the capacity.
+ CapacityRange *CapacityRange `json:",omitempty"`
+
+ // Secrets defines Swarm Secrets that are passed to the CSI storage plugin
+ // when operating on this volume.
+ Secrets []Secret `json:",omitempty"`
+
+ // Availability is the Volume's desired availability. Analogous to Node
+ // Availability, this allows the user to take volumes offline in order to
+ // update or delete them.
+ Availability Availability `json:",omitempty"`
+}
+
+// Availability specifies the availability of the volume.
+type Availability string
+
+const (
+ // AvailabilityActive indicates that the volume is active and fully
+ // schedulable on the cluster.
+ AvailabilityActive Availability = "active"
+
+ // AvailabilityPause indicates that no new workloads should use the
+ // volume, but existing workloads can continue to use it.
+ AvailabilityPause Availability = "pause"
+
+ // AvailabilityDrain indicates that all workloads using this volume
+ // should be rescheduled, and the volume unpublished from all nodes.
+ AvailabilityDrain Availability = "drain"
+)
+
+// AccessMode defines the access mode of a volume.
+type AccessMode struct {
+ // Scope defines the set of nodes this volume can be used on at one time.
+ Scope Scope `json:",omitempty"`
+
+ // Sharing defines the number and way that different tasks can use this
+ // volume at one time.
+ Sharing SharingMode `json:",omitempty"`
+
+ // MountVolume defines options for using this volume as a Mount-type
+ // volume.
+ //
+ // Either BlockVolume or MountVolume, but not both, must be present.
+ MountVolume *TypeMount `json:",omitempty"`
+
+ // BlockVolume defines options for using this volume as a Block-type
+ // volume.
+ //
+ // Either BlockVolume or MountVolume, but not both, must be present.
+ BlockVolume *TypeBlock `json:",omitempty"`
+}
+
+// Scope defines the Scope of a Cluster Volume. This is how many nodes a
+// Volume can be accessed simultaneously on.
+type Scope string
+
+const (
+ // ScopeSingleNode indicates the volume can be used on one node at a
+ // time.
+ ScopeSingleNode Scope = "single"
+
+ // ScopeMultiNode indicates the volume can be used on many nodes at
+ // the same time.
+ ScopeMultiNode Scope = "multi"
+)
+
+// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a
+// Volume at the same time can use it.
+type SharingMode string
+
+const (
+ // SharingNone indicates that only one Task may use the Volume at a
+ // time.
+ SharingNone SharingMode = "none"
+
+ // SharingReadOnly indicates that the Volume may be shared by any
+ // number of Tasks, but they must be read-only.
+ SharingReadOnly SharingMode = "readonly"
+
+ // SharingOneWriter indicates that the Volume may be shared by any
+ // number of Tasks, but all after the first must be read-only.
+ SharingOneWriter SharingMode = "onewriter"
+
+ // SharingAll means that the Volume may be shared by any number of
+ // Tasks, as readers or writers.
+ SharingAll SharingMode = "all"
+)
+
+// TypeBlock defines options for using a volume as a block-type volume.
+//
+// Intentionally empty.
+type TypeBlock struct{}
+
+// TypeMount contains options for using a volume as a Mount-type
+// volume.
+type TypeMount struct {
+ // FsType specifies the filesystem type for the mount volume. Optional.
+ FsType string `json:",omitempty"`
+
+ // MountFlags defines flags to pass when mounting the volume. Optional.
+ MountFlags []string `json:",omitempty"`
+}
+
+// TopologyRequirement expresses the user's requirements for a volume's
+// accessible topology.
+type TopologyRequirement struct {
+ // Requisite specifies a list of Topologies, at least one of which the
+ // volume must be accessible from.
+ //
+ // Taken verbatim from the CSI Spec:
+ //
+ // Specifies the list of topologies the provisioned volume MUST be
+ // accessible from.
+ // This field is OPTIONAL. If TopologyRequirement is specified either
+ // requisite or preferred or both MUST be specified.
+ //
+ // If requisite is specified, the provisioned volume MUST be
+ // accessible from at least one of the requisite topologies.
+ //
+ // Given
+ // x = number of topologies provisioned volume is accessible from
+ // n = number of requisite topologies
+ // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
+ // If x==n, then the SP MUST make the provisioned volume available to
+ // all topologies from the list of requisite topologies. If it is
+ // unable to do so, the SP MUST fail the CreateVolume call.
+ // For example, if a volume should be accessible from a single zone,
+ // and requisite =
+ // {"region": "R1", "zone": "Z2"}
+ // then the provisioned volume MUST be accessible from the "region"
+ // "R1" and the "zone" "Z2".
+ // Similarly, if a volume should be accessible from two zones, and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ // then the provisioned volume MUST be accessible from the "region"
+ // "R1" and both "zone" "Z2" and "zone" "Z3".
+ //
+ // If x<n, then the SP SHALL choose x unique topologies from the list
+ // of requisite topologies. If it is unable to do so, the SP MUST fail
+ // the CreateVolume call.
+ // For example, if a volume should be accessible from a single zone,
+ // and requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ // then the SP may choose to make the provisioned volume available in
+ // either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
+ // Similarly, if a volume should be accessible from two zones, and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"}
+ // then the provisioned volume MUST be accessible from any combination
+ // of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
+ // "R1/Z4", or "R1/Z3" and "R1/Z4".
+ //
+ // If x>n, then the SP MUST make the provisioned volume available from
+ // all topologies from the list of requisite topologies and MAY choose
+ // the remaining x-n unique topologies from the list of all possible
+ // topologies. If it is unable to do so, the SP MUST fail the
+ // CreateVolume call.
+ // For example, if a volume should be accessible from two zones, and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"}
+ // then the provisioned volume MUST be accessible from the "region"
+ // "R1" and the "zone" "Z2" and the SP may select the second zone
+ // independently, e.g. "R1/Z4".
+ Requisite []Topology `json:",omitempty"`
+
+ // Preferred is a list of Topologies that the volume should attempt to be
+ // provisioned in.
+ //
+ // Taken from the CSI spec:
+ //
+ // Specifies the list of topologies the CO would prefer the volume to
+ // be provisioned in.
+ //
+ // This field is OPTIONAL. If TopologyRequirement is specified either
+ // requisite or preferred or both MUST be specified.
+ //
+ // An SP MUST attempt to make the provisioned volume available using
+ // the preferred topologies in order from first to last.
+ //
+ // If requisite is specified, all topologies in preferred list MUST
+ // also be present in the list of requisite topologies.
+ //
+ // If the SP is unable to make the provisioned volume available
+ // from any of the preferred topologies, the SP MAY choose a topology
+ // from the list of requisite topologies.
+ // If the list of requisite topologies is not specified, then the SP
+ // MAY choose from the list of all possible topologies.
+ // If the list of requisite topologies is specified and the SP is
+ // unable to make the provisioned volume available from any of the
+ // requisite topologies it MUST fail the CreateVolume call.
+ //
+ // Example 1:
+ // Given a volume should be accessible from a single zone, and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ // preferred =
+ // {"region": "R1", "zone": "Z3"}
+ // then the SP SHOULD first attempt to make the provisioned volume
+ // available from "zone" "Z3" in the "region" "R1" and fall back to
+ // "zone" "Z2" in the "region" "R1" if that is not possible.
+ //
+ // Example 2:
+ // Given a volume should be accessible from a single zone, and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z5"}
+ // preferred =
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z2"}
+ // then the SP SHOULD first attempt to make the provisioned volume
+ // accessible from "zone" "Z4" in the "region" "R1" and fall back to
+ // "zone" "Z2" in the "region" "R1" if that is not possible. If that
+ // is not possible, the SP may choose between either the "zone"
+ // "Z3" or "Z5" in the "region" "R1".
+ //
+ // Example 3:
+ // Given a volume should be accessible from TWO zones (because an
+ // opaque parameter in CreateVolumeRequest, for example, specifies
+ // the volume is accessible from two zones, aka synchronously
+ // replicated), and
+ // requisite =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z5"}
+ // preferred =
+ // {"region": "R1", "zone": "Z5"},
+ // {"region": "R1", "zone": "Z3"}
+ // then the SP SHOULD first attempt to make the provisioned volume
+ // accessible from the combination of the two "zones" "Z5" and "Z3" in
+ // the "region" "R1". If that's not possible, it should fall back to
+ // a combination of "Z5" and other possibilities from the list of
+ // requisite. If that's not possible, it should fall back to a
+ // combination of "Z3" and other possibilities from the list of
+ // requisite. If that's not possible, it should fall back to a
+ // combination of other possibilities from the list of requisite.
+ Preferred []Topology `json:",omitempty"`
+}
+
+// Topology is a map of topological domains to topological segments.
+//
+// This description is taken verbatim from the CSI Spec:
+//
+// A topological domain is a sub-division of a cluster, like "region",
+// "zone", "rack", etc.
+// A topological segment is a specific instance of a topological domain,
+// like "zone3", "rack3", etc.
+// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
+// Valid keys have two segments: an OPTIONAL prefix and name, separated
+// by a slash (/), for example: "com.company.example/zone".
+// The key name segment is REQUIRED. The prefix is OPTIONAL.
+// The key name MUST be 63 characters or less, begin and end with an
+// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
+// underscores (_), dots (.), or alphanumerics in between, for example
+// "zone".
+// The key prefix MUST be 63 characters or less, begin and end with a
+// lower-case alphanumeric character ([a-z0-9]), contain only
+// dashes (-), dots (.), or lower-case alphanumerics in between, and
+// follow domain name notation format
+// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
+// The key prefix SHOULD include the plugin's host company name and/or
+// the plugin name, to minimize the possibility of collisions with keys
+// from other plugins.
+// If a key prefix is specified, it MUST be identical across all
+// topology keys returned by the SP (across all RPCs).
+// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
+// MUST not both exist.
+// Each value (topological segment) MUST contain 1 or more strings.
+// Each string MUST be 63 characters or less and begin and end with an
+// alphanumeric character with '-', '_', '.', or alphanumerics in
+// between.
+type Topology struct {
+ Segments map[string]string `json:",omitempty"`
+}
+
+// CapacityRange describes the minimum and maximum capacity a volume should be
+// created with
+type CapacityRange struct {
+ // RequiredBytes specifies that a volume must be at least this big. The
+ // value of 0 indicates an unspecified minimum.
+ RequiredBytes int64
+
+ // LimitBytes specifies that a volume must not be bigger than this. The
+ // value of 0 indicates an unspecified maximum
+ LimitBytes int64
+}
+
+// Secret represents a Swarm Secret value that must be passed to the CSI
+// storage plugin when operating on this Volume. It represents one key-value
+// pair of possibly many.
+type Secret struct {
+ // Key is the name of the key of the key-value pair passed to the plugin.
+ Key string
+
+ // Secret is the swarm Secret object from which to read data. This can be a
+ // Secret name or ID. The Secret data is retrieved by Swarm and used as the
+ // value of the key-value pair passed to the plugin.
+ Secret string
+}
+
+// PublishState represents the state of a Volume as it pertains to its
+// use on a particular Node.
+type PublishState string
+
+const (
+ // StatePending indicates that the volume should be published on
+ // this node, but the call to ControllerPublishVolume has not been
+ // successfully completed yet and the result recorded by swarmkit.
+ StatePending PublishState = "pending-publish"
+
+ // StatePublished means the volume is published successfully to the node.
+ StatePublished PublishState = "published"
+
+ // StatePendingNodeUnpublish indicates that the Volume should be
+ // unpublished on the Node, and we're waiting for confirmation that it has
+ // done so. After the Node has confirmed that the Volume has been
+ // unpublished, the state will move to StatePendingUnpublish.
+ StatePendingNodeUnpublish PublishState = "pending-node-unpublish"
+
+ // StatePendingUnpublish means the volume is still published to the node
+ // by the controller, awaiting the operation to unpublish it.
+ StatePendingUnpublish PublishState = "pending-controller-unpublish"
+)
+
+// PublishStatus represents the status of the volume as published to an
+// individual node
+type PublishStatus struct {
+ // NodeID is the ID of the swarm node this Volume is published to.
+ NodeID string `json:",omitempty"`
+
+ // State is the publish state of the volume.
+ State PublishState `json:",omitempty"`
+
+ // PublishContext is the PublishContext returned by the CSI plugin when
+ // a volume is published.
+ PublishContext map[string]string `json:",omitempty"`
+}
+
+// Info contains information about the Volume as a whole as provided by
+// the CSI storage plugin.
+type Info struct {
+ // CapacityBytes is the capacity of the volume in bytes. A value of 0
+ // indicates that the capacity is unknown.
+ CapacityBytes int64 `json:",omitempty"`
+
+ // VolumeContext is the context originating from the CSI storage plugin
+ // when the Volume is created.
+ VolumeContext map[string]string `json:",omitempty"`
+
+ // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This
+ // is distinct from the Volume's Swarm ID, which is the ID used by all of
+ // the Docker Engine to refer to the Volume. If this field is blank, then
+ // the Volume has not been successfully created yet.
+ VolumeID string `json:",omitempty"`
+
+ // AccessibleTopology is the topology this volume is actually accessible
+ // from.
+ AccessibleTopology []Topology `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go
new file mode 100644
index 0000000..37c41a6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go
@@ -0,0 +1,29 @@
+package volume
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// CreateOptions VolumeConfig
+//
+// Volume configuration
+// swagger:model CreateOptions
+type CreateOptions struct {
+
+ // cluster volume spec
+ ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"`
+
+ // Name of the volume driver to use.
+ Driver string `json:"Driver,omitempty"`
+
+ // A mapping of driver options and values. These options are
+ // passed directly to the driver and are driver specific.
+ //
+ DriverOpts map[string]string `json:"DriverOpts,omitempty"`
+
+ // User-defined key/value metadata.
+ Labels map[string]string `json:"Labels,omitempty"`
+
+ // The new volume's name. If not specified, Docker generates a name.
+ //
+ Name string `json:"Name,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go
new file mode 100644
index 0000000..ca5192a
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go
@@ -0,0 +1,18 @@
+package volume
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ListResponse VolumeListResponse
+//
+// Volume list response
+// swagger:model ListResponse
+type ListResponse struct {
+
+ // List of volumes
+ Volumes []*Volume `json:"Volumes"`
+
+ // Warnings that occurred when fetching the list of volumes.
+ //
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go
new file mode 100644
index 0000000..0b9645e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/options.go
@@ -0,0 +1,15 @@
+package volume // import "github.com/docker/docker/api/types/volume"
+
+import "github.com/docker/docker/api/types/filters"
+
+// ListOptions holds parameters to list volumes.
+type ListOptions struct {
+ Filters filters.Args
+}
+
+// PruneReport contains the response for Engine API:
+// POST "/volumes/prune"
+type PruneReport struct {
+ VolumesDeleted []string
+ SpaceReclaimed uint64
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go
new file mode 100644
index 0000000..ea7d555
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volume.go
@@ -0,0 +1,75 @@
+package volume
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Volume volume
+// swagger:model Volume
+type Volume struct {
+
+ // cluster volume
+ ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"`
+
+ // Date/Time the volume was created.
+ CreatedAt string `json:"CreatedAt,omitempty"`
+
+ // Name of the volume driver used by the volume.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // Mount path of the volume on the host.
+ // Required: true
+ Mountpoint string `json:"Mountpoint"`
+
+ // Name of the volume.
+ // Required: true
+ Name string `json:"Name"`
+
+ // The driver specific options used when creating the volume.
+ //
+ // Required: true
+ Options map[string]string `json:"Options"`
+
+ // The level at which the volume exists. Either `global` for cluster-wide,
+ // or `local` for machine level.
+ //
+ // Required: true
+ Scope string `json:"Scope"`
+
+ // Low-level details about the volume, provided by the volume driver.
+ // Details are returned as a map with key/value pairs:
+ // `{"key":"value","key2":"value2"}`.
+ //
+ // The `Status` field is optional, and is omitted if the volume driver
+ // does not support this feature.
+ //
+ Status map[string]interface{} `json:"Status,omitempty"`
+
+ // usage data
+ UsageData *UsageData `json:"UsageData,omitempty"`
+}
+
+// UsageData Usage details about the volume. This information is used by the
+// `GET /system/df` endpoint, and omitted in other endpoints.
+//
+// swagger:model UsageData
+type UsageData struct {
+
+ // The number of containers referencing this volume. This field
+ // is set to `-1` if the reference-count is not available.
+ //
+ // Required: true
+ RefCount int64 `json:"RefCount"`
+
+ // Amount of disk space used by the volume (in bytes). This information
+ // is only available for volumes created with the `"local"` volume
+ // driver. For volumes created with other volume drivers, this field
+ // is set to `-1` ("not available")
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go
new file mode 100644
index 0000000..f958f80
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go
@@ -0,0 +1,7 @@
+package volume // import "github.com/docker/docker/api/types/volume"
+
+// UpdateOptions is configuration to update a Volume with.
+type UpdateOptions struct {
+ // Spec is the ClusterVolumeSpec to update the volume to.
+ Spec *ClusterVolumeSpec `json:"Spec,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md
new file mode 100644
index 0000000..f8af3ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/README.md
@@ -0,0 +1,38 @@
+# Go client for the Docker Engine API
+
+The `docker` command uses this package to communicate with the daemon. It can
+also be used by your own Go applications to do anything the command-line
+interface does – running containers, pulling images, managing swarms, etc.
+
+For example, to list all containers (the equivalent of `docker ps --all`):
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/client"
+)
+
+func main() {
+ apiClient, err := client.NewClientWithOpts(client.FromEnv)
+ if err != nil {
+ panic(err)
+ }
+ defer apiClient.Close()
+
+ containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, ctr := range containers {
+ fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status)
+ }
+}
+```
+
+[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client)
diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go
new file mode 100644
index 0000000..51a73cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/build_cancel.go
@@ -0,0 +1,16 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+)
+
+// BuildCancel requests the daemon to cancel the ongoing build request.
+func (cli *Client) BuildCancel(ctx context.Context, id string) error {
+ query := url.Values{}
+ query.Set("id", id)
+
+ resp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
new file mode 100644
index 0000000..92b47d1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/build_prune.go
@@ -0,0 +1,56 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/pkg/errors"
+)
+
+// BuildCachePrune requests the daemon to delete unused cache data
+func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
+ if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ if opts.All {
+ query.Set("all", "1")
+ }
+
+ if opts.KeepStorage != 0 {
+ query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage)))
+ }
+ if opts.ReservedSpace != 0 {
+ query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace)))
+ }
+ if opts.MaxUsedSpace != 0 {
+ query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace)))
+ }
+ if opts.MinFreeSpace != 0 {
+ query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace)))
+ }
+ f, err := filters.ToJSON(opts.Filters)
+ if err != nil {
+ return nil, errors.Wrap(err, "prune could not marshal filters option")
+ }
+ query.Set("filters", f)
+
+ resp, err := cli.post(ctx, "/build/prune", query, nil, nil)
+ defer ensureReaderClosed(resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ report := types.BuildCachePruneReport{}
+ if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
+ return nil, errors.Wrap(err, "error retrieving disk usage")
+ }
+
+ return &report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint.go b/vendor/github.com/docker/docker/client/checkpoint.go
new file mode 100644
index 0000000..f690f7c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint.go
@@ -0,0 +1,18 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/checkpoint"
+)
+
+// CheckpointAPIClient defines API client methods for the checkpoints.
+//
+// Experimental: checkpoint and restore is still an experimental feature,
+// and only available if the daemon is running with experimental features
+// enabled.
+type CheckpointAPIClient interface {
+ CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error
+ CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error
+ CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error)
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go
new file mode 100644
index 0000000..7b06fee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_create.go
@@ -0,0 +1,19 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/checkpoint"
+)
+
+// CheckpointCreate creates a checkpoint from the given container with the given name
+func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options checkpoint.CreateOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go
new file mode 100644
index 0000000..d15162e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go
@@ -0,0 +1,25 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/checkpoint"
+)
+
+// CheckpointDelete deletes the checkpoint with the given name from the given container
+func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
new file mode 100644
index 0000000..9e7963f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -0,0 +1,28 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/checkpoint"
+)
+
+// CheckpointList returns the checkpoints of the given container in the docker host
+func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) {
+ var checkpoints []checkpoint.Summary
+
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return checkpoints, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&checkpoints)
+ return checkpoints, err
+}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
new file mode 100644
index 0000000..cd47f05
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -0,0 +1,474 @@
+/*
+Package client is a Go client for the Docker Engine API.
+
+For more information about the Engine API, see the documentation:
+https://docs.docker.com/reference/api/engine/
+
+# Usage
+
+You use the library by constructing a client object using [NewClientWithOpts]
+and calling methods on it. The client can be configured from environment
+variables by passing the [FromEnv] option, or configured manually by passing any
+of the other available [Opts].
+
+For example, to list running containers (the equivalent of "docker ps"):
+
+ package main
+
+ import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/client"
+ )
+
+ func main() {
+ cli, err := client.NewClientWithOpts(client.FromEnv)
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), container.ListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, ctr := range containers {
+ fmt.Printf("%s %s\n", ctr.ID, ctr.Image)
+ }
+ }
+*/
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/docker/docker/api"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/go-connections/sockets"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+)
+
+// DummyHost is a hostname used for local communication.
+//
+// It acts as a valid formatted hostname for local connections (such as "unix://"
+// or "npipe://") which do not require a hostname. It should never be resolved,
+// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2]
+// and [RFC 6761, Section 6.3]).
+//
+// [RFC 7230, Section 5.4] defines that an empty header must be used for such
+// cases:
+//
+// If the authority component is missing or undefined for the target URI,
+// then a client MUST send a Host header field with an empty field-value.
+//
+// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not
+// allow an empty header to be used, and requires req.URL.Scheme to be either
+// "http" or "https".
+//
+// For further details, refer to:
+//
+// - https://github.com/docker/engine-api/issues/189
+// - https://github.com/golang/go/issues/13624
+// - https://github.com/golang/go/issues/61076
+// - https://github.com/moby/moby/issues/45935
+//
+// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2
+// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3
+// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4
+// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569
+const DummyHost = "api.moby.localhost"
+
+// fallbackAPIVersion is the version to fallback to if API-version negotiation
+// fails. This version is the highest version of the API before API-version
+// negotiation was introduced. If negotiation fails (or no API version was
+// included in the API response), we assume the API server uses the most
+// recent version before negotiation was introduced.
+const fallbackAPIVersion = "1.24"
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // scheme sets the scheme for the client
+ scheme string
+ // host holds the server address to connect to
+ host string
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // client used to send and receive http requests.
+ client *http.Client
+ // version of the server to talk to.
+ version string
+ // userAgent is the User-Agent header to use for HTTP requests. It takes
+ // precedence over User-Agent headers set in customHTTPHeaders, and other
+ // header variables. When set to an empty string, the User-Agent header
+ // is removed, and no header is sent.
+ userAgent *string
+ // custom HTTP headers configured by users.
+ customHTTPHeaders map[string]string
+ // manualOverride is set to true when the version was set by users.
+ manualOverride bool
+
+ // negotiateVersion indicates if the client should automatically negotiate
+ // the API version to use when making requests. API version negotiation is
+ // performed on the first request, after which negotiated is set to "true"
+ // so that subsequent requests do not re-negotiate.
+ negotiateVersion bool
+
+ // negotiated indicates that API version negotiation took place
+ negotiated atomic.Bool
+
+ // negotiateLock is used to single-flight the version negotiation process
+ negotiateLock sync.Mutex
+
+ traceOpts []otelhttp.Option
+
+ // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
+ // Store the original transport as the http.Client transport will be wrapped with tracing libs.
+ baseTransport *http.Transport
+}
+
+// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
+var ErrRedirect = errors.New("unexpected redirect in response")
+
+// CheckRedirect specifies the policy for dealing with redirect responses. It
+// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for
+// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise
+// returns a [http.ErrUseLastResponse], which is special-cased by http.Client
+// to use the last response.
+//
+// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308)
+// in the client. The client (and by extension API client) can be made to send
+// a request like "POST /containers//start" where what would normally be in the
+// name section of the URL is empty. This triggers an HTTP 301 from the daemon.
+//
+// In go 1.8 this 301 is converted to a GET request, and ends up getting
+// a 404 from the daemon. This behavior change manifests in the client in that
+// before, the 301 was not followed and the client did not generate an error,
+// but now results in a message like "Error response from daemon: page not found".
+func CheckRedirect(_ *http.Request, via []*http.Request) error {
+ if via[0].Method == http.MethodGet {
+ return http.ErrUseLastResponse
+ }
+ return ErrRedirect
+}
+
+// NewClientWithOpts initializes a new API client with a default HTTPClient, and
+// default API host and version. It also initializes the custom HTTP headers to
+// add to each request.
+//
+// It takes an optional list of [Opt] functional arguments, which are applied in
+// the order they're provided, which allows modifying the defaults when creating
+// the client. For example, the following initializes a client that configures
+// itself with values from environment variables ([FromEnv]), and has automatic
+// API version negotiation enabled ([WithAPIVersionNegotiation]).
+//
+// cli, err := client.NewClientWithOpts(
+// client.FromEnv,
+// client.WithAPIVersionNegotiation(),
+// )
+func NewClientWithOpts(ops ...Opt) (*Client, error) {
+ hostURL, err := ParseHostURL(DefaultDockerHost)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := defaultHTTPClient(hostURL)
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ host: DefaultDockerHost,
+ version: api.DefaultVersion,
+ client: client,
+ proto: hostURL.Scheme,
+ addr: hostURL.Host,
+
+ traceOpts: []otelhttp.Option{
+ otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string {
+ return req.Method + " " + req.URL.Path
+ }),
+ },
+ }
+
+ for _, op := range ops {
+ if err := op(c); err != nil {
+ return nil, err
+ }
+ }
+
+ if tr, ok := c.client.Transport.(*http.Transport); ok {
+ // Store the base transport before we wrap it in tracing libs below
+ // This is used, as an example, to close idle connections when the client is closed
+ c.baseTransport = tr
+ }
+
+ if c.scheme == "" {
+ // TODO(stevvooe): This isn't really the right way to write clients in Go.
+ // `NewClient` should probably only take an `*http.Client` and work from there.
+ // Unfortunately, the model of having a host-ish/url-thingy as the connection
+ // string has us confusing protocol and transport layers. We continue doing
+ // this to avoid breaking existing clients but this should be addressed.
+ if c.tlsConfig() != nil {
+ c.scheme = "https"
+ } else {
+ c.scheme = "http"
+ }
+ }
+
+ c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...)
+
+ return c, nil
+}
+
+func (cli *Client) tlsConfig() *tls.Config {
+ if cli.baseTransport == nil {
+ return nil
+ }
+ return cli.baseTransport.TLSClientConfig
+}
+
+func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
+ transport := &http.Transport{}
+ // Necessary to prevent long-lived processes using the
+ // client from leaking connections due to idle connections
+ // not being released.
+ // TODO: see if we can also address this from the server side,
+ // or in go-connections.
+ // see: https://github.com/moby/moby/issues/45539
+ transport.MaxIdleConns = 6
+ transport.IdleConnTimeout = 30 * time.Second
+ err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
+ if err != nil {
+ return nil, err
+ }
+ return &http.Client{
+ Transport: transport,
+ CheckRedirect: CheckRedirect,
+ }, nil
+}
+
+// Close the transport used by the client
+func (cli *Client) Close() error {
+ if cli.baseTransport != nil {
+ cli.baseTransport.CloseIdleConnections()
+ return nil
+ }
+ return nil
+}
+
+// checkVersion manually triggers API version negotiation (if configured).
+// This allows for version-dependent code to use the same version as will
+// be negotiated when making the actual requests, and for which cases
+// we cannot do the negotiation lazily.
+func (cli *Client) checkVersion(ctx context.Context) error {
+ if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() {
+ // Ensure exclusive write access to version and negotiated fields
+ cli.negotiateLock.Lock()
+ defer cli.negotiateLock.Unlock()
+
+ // May have been set during last execution of critical zone
+ if cli.negotiated.Load() {
+ return nil
+ }
+
+ ping, err := cli.Ping(ctx)
+ if err != nil {
+ return err
+ }
+ cli.negotiateAPIVersionPing(ping)
+ }
+ return nil
+}
+
+// getAPIPath returns the versioned request path to call the API.
+// It appends the query parameters to the path if they are not empty.
+func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {
+ var apiPath string
+ _ = cli.checkVersion(ctx)
+ if cli.version != "" {
+ apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p)
+ } else {
+ apiPath = path.Join(cli.basePath, p)
+ }
+ return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
+}
+
+// ClientVersion returns the API version used by this client.
+func (cli *Client) ClientVersion() string {
+ return cli.version
+}
+
+// NegotiateAPIVersion queries the API and updates the version to match the API
+// version. NegotiateAPIVersion downgrades the client's API version to match the
+// APIVersion if the ping version is lower than the default version. If the API
+// version reported by the server is higher than the maximum version supported
+// by the client, it uses the client's maximum version.
+//
+// If a manual override is in place, either through the "DOCKER_API_VERSION"
+// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized
+// with a fixed version ([WithVersion]), no negotiation is performed.
+//
+// If the API server's ping response does not contain an API version, or if the
+// client did not get a successful ping response, it assumes it is connected with
+// an old daemon that does not support API version negotiation, in which case it
+// downgrades to the latest version of the API before version negotiation was
+// added (1.24).
+func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
+ if !cli.manualOverride {
+ // Avoid concurrent modification of version-related fields
+ cli.negotiateLock.Lock()
+ defer cli.negotiateLock.Unlock()
+
+ ping, err := cli.Ping(ctx)
+ if err != nil {
+ // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it.
+ return
+ }
+ cli.negotiateAPIVersionPing(ping)
+ }
+}
+
+// NegotiateAPIVersionPing downgrades the client's API version to match the
+// APIVersion in the ping response. If the API version in pingResponse is higher
+// than the maximum version supported by the client, it uses the client's maximum
+// version.
+//
+// If a manual override is in place, either through the "DOCKER_API_VERSION"
+// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized
+// with a fixed version ([WithVersion]), no negotiation is performed.
+//
+// If the API server's ping response does not contain an API version, we assume
+// we are connected with an old daemon without API version negotiation support,
+// and downgrade to the latest version of the API before version negotiation was
+// added (1.24).
+func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) {
+ if !cli.manualOverride {
+ // Avoid concurrent modification of version-related fields
+ cli.negotiateLock.Lock()
+ defer cli.negotiateLock.Unlock()
+
+ cli.negotiateAPIVersionPing(pingResponse)
+ }
+}
+
+// negotiateAPIVersionPing queries the API and updates the version to match the
+// API version from the ping response.
+func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) {
+ // default to the latest version before versioning headers existed
+ if pingResponse.APIVersion == "" {
+ pingResponse.APIVersion = fallbackAPIVersion
+ }
+
+ // if the client is not initialized with a version, start with the latest supported version
+ if cli.version == "" {
+ cli.version = api.DefaultVersion
+ }
+
+ // if server version is lower than the client version, downgrade
+ if versions.LessThan(pingResponse.APIVersion, cli.version) {
+ cli.version = pingResponse.APIVersion
+ }
+
+ // Store the results, so that automatic API version negotiation (if enabled)
+ // won't be performed on the next request.
+ if cli.negotiateVersion {
+ cli.negotiated.Store(true)
+ }
+}
+
+// DaemonHost returns the host address used by the client
+func (cli *Client) DaemonHost() string {
+ return cli.host
+}
+
+// HTTPClient returns a copy of the HTTP client bound to the server
+func (cli *Client) HTTPClient() *http.Client {
+ c := *cli.client
+ return &c
+}
+
+// ParseHostURL parses a url string, validates the string is a host url, and
+// returns the parsed URL
+func ParseHostURL(host string) (*url.URL, error) {
+ proto, addr, ok := strings.Cut(host, "://")
+ if !ok || addr == "" {
+ return nil, errors.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return nil, err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return &url.URL{
+ Scheme: proto,
+ Host: addr,
+ Path: basePath,
+ }, nil
+}
+
+func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) {
+ if cli.baseTransport == nil || cli.baseTransport.DialContext == nil {
+ return nil
+ }
+
+ if cli.baseTransport.TLSClientConfig != nil {
+ // When using a tls config we don't use the configured dialer but instead a fallback dialer...
+ // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn
+ // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit.
+ return nil
+ }
+ return cli.baseTransport.DialContext
+}
+
+// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
+// that can be used for proxying the daemon connection. It is used by
+// ["docker dial-stdio"].
+//
+// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
+func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
+ return cli.dialer()
+}
+
+func (cli *Client) dialer() func(context.Context) (net.Conn, error) {
+ return func(ctx context.Context) (net.Conn, error) {
+ if dialFn := cli.dialerFromTransport(); dialFn != nil {
+ return dialFn(ctx, cli.proto, cli.addr)
+ }
+ switch cli.proto {
+ case "unix":
+ return net.Dial(cli.proto, cli.addr)
+ case "npipe":
+ return sockets.DialPipe(cli.addr, 32*time.Second)
+ default:
+ if tlsConfig := cli.tlsConfig(); tlsConfig != nil {
+ return tls.Dial(cli.proto, cli.addr, tlsConfig)
+ }
+ return net.Dial(cli.proto, cli.addr)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go
new file mode 100644
index 0000000..9e366ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_deprecated.go
@@ -0,0 +1,27 @@
+package client
+
+import "net/http"
+
+// NewClient initializes a new API client for the given host and API version.
+// It uses the given http client as transport.
+// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
+//
+// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion],
+// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API
+// version negotiation by passing the [WithAPIVersionNegotiation] option instead
+// of WithVersion.
+func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
+ return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
+}
+
+// NewEnvClient initializes a new API client based on environment variables.
+// See FromEnv for a list of support environment variables.
+//
+// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option.
+func NewEnvClient() (*Client, error) {
+ return NewClientWithOpts(FromEnv)
+}
diff --git a/vendor/github.com/docker/docker/client/client_interfaces.go b/vendor/github.com/docker/docker/client/client_interfaces.go
new file mode 100644
index 0000000..f70d8ff
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_interfaces.go
@@ -0,0 +1,236 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/system"
+ "github.com/docker/docker/api/types/volume"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
+//
+// Deprecated: use [APIClient] instead. This type will be an alias for [APIClient] in the next release, and removed after.
+type CommonAPIClient = stableAPIClient
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ stableAPIClient
+ CheckpointAPIClient // CheckpointAPIClient is still experimental.
+}
+
+type stableAPIClient interface {
+ ConfigAPIClient
+ ContainerAPIClient
+ DistributionAPIClient
+ ImageAPIClient
+ NetworkAPIClient
+ PluginAPIClient
+ SystemAPIClient
+ VolumeAPIClient
+ ClientVersion() string
+ DaemonHost() string
+ HTTPClient() *http.Client
+ ServerVersion(ctx context.Context) (types.Version, error)
+ NegotiateAPIVersion(ctx context.Context)
+ NegotiateAPIVersionPing(types.Ping)
+ HijackDialer
+ Dialer() func(context.Context) (net.Conn, error)
+ Close() error
+ SwarmManagementAPIClient
+}
+
+// SwarmManagementAPIClient defines all methods for managing Swarm-specific
+// objects.
+type SwarmManagementAPIClient interface {
+ SwarmAPIClient
+ NodeAPIClient
+ ServiceAPIClient
+ SecretAPIClient
+ ConfigAPIClient
+}
+
+// HijackDialer defines methods for a hijack dialer.
+type HijackDialer interface {
+ DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
+ ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error)
+ ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (container.CommitResponse, error)
+ ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error)
+ ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error)
+ ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error)
+ ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (container.ExecCreateResponse, error)
+ ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error)
+ ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error
+ ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error
+ ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error)
+ ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error)
+ ContainerKill(ctx context.Context, container, signal string) error
+ ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error)
+ ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error)
+ ContainerPause(ctx context.Context, container string) error
+ ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error
+ ContainerRename(ctx context.Context, container, newContainerName string) error
+ ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error
+ ContainerRestart(ctx context.Context, container string, options container.StopOptions) error
+ ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error)
+ ContainerStats(ctx context.Context, container string, stream bool) (container.StatsResponseReader, error)
+ ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error)
+ ContainerStart(ctx context.Context, container string, options container.StartOptions) error
+ ContainerStop(ctx context.Context, container string, options container.StopOptions) error
+ ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error)
+ ContainerUnpause(ctx context.Context, container string) error
+ ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error)
+ ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)
+ CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error)
+ CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error
+ ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error)
+}
+
+// DistributionAPIClient defines API client methods for the registry
+type DistributionAPIClient interface {
+ DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
+ ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
+ BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
+ BuildCancel(ctx context.Context, id string) error
+ ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
+ ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
+
+ ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error)
+ ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error)
+ ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error)
+ ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error)
+ ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error)
+ ImageTag(ctx context.Context, image, ref string) error
+ ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error)
+
+ ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (image.InspectResponse, error)
+ ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) ([]image.HistoryResponseItem, error)
+ ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (image.LoadResponse, error)
+ ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (io.ReadCloser, error)
+
+ ImageAPIClientDeprecated
+}
+
+// ImageAPIClientDeprecated defines deprecated methods of the ImageAPIClient.
+type ImageAPIClientDeprecated interface {
+ // ImageInspectWithRaw returns the image information and its raw representation.
+ //
+ // Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option.
+ ImageInspectWithRaw(ctx context.Context, image string) (image.InspectResponse, []byte, error)
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
+ NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error
+ NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error)
+ NetworkDisconnect(ctx context.Context, network, container string, force bool) error
+ NetworkInspect(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, error)
+ NetworkInspectWithRaw(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, []byte, error)
+ NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error)
+ NetworkRemove(ctx context.Context, network string) error
+ NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error)
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+ NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
+ PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
+ PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
+ PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
+ PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
+ PluginSet(ctx context.Context, name string, args []string) error
+ PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
+ PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error)
+ ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error)
+ TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error)
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
+ SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+ Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error)
+ Info(ctx context.Context) (system.Info, error)
+ RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error)
+ DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error)
+ Ping(ctx context.Context) (types.Ping, error)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
+ VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error)
+ VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error)
+ VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error)
+ VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error)
+ VolumeRemove(ctx context.Context, volumeID string, force bool) error
+ VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error)
+ VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error
+}
+
+// SecretAPIClient defines API client methods for secrets
+type SecretAPIClient interface {
+ SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
+ SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
+ SecretRemove(ctx context.Context, id string) error
+ SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
+ SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
+}
+
+// ConfigAPIClient defines API client methods for configs
+type ConfigAPIClient interface {
+ ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
+ ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
+ ConfigRemove(ctx context.Context, id string) error
+ ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
+ ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
+}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
new file mode 100644
index 0000000..9fe78ea
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -0,0 +1,7 @@
+//go:build !windows
+
+package client // import "github.com/docker/docker/client"
+
+// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
+// (EnvOverrideHost) environment variable is unset or empty.
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go
new file mode 100644
index 0000000..56572d1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_windows.go
@@ -0,0 +1,5 @@
+package client // import "github.com/docker/docker/client"
+
+// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
+// (EnvOverrideHost) environment variable is unset or empty.
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
new file mode 100644
index 0000000..c7ea6d2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_create.go
@@ -0,0 +1,25 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ConfigCreate creates a new config.
+func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
+ var response types.ConfigCreateResponse
+ if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
new file mode 100644
index 0000000..679a42c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_inspect.go
@@ -0,0 +1,37 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ConfigInspectWithRaw returns the config information with raw data
+func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
+ id, err := trimID("contig", id)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+ if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil {
+ return swarm.Config{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+
+ var config swarm.Config
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&config)
+
+ return config, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
new file mode 100644
index 0000000..7e4a8ea
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -0,0 +1,38 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ConfigList returns the list of configs.
+func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
+ if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/configs", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var configs []swarm.Config
+ err = json.NewDecoder(resp.Body).Decode(&configs)
+ return configs, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
new file mode 100644
index 0000000..a2955c6
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_remove.go
@@ -0,0 +1,17 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// ConfigRemove removes a config.
+func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
+ id, err := trimID("config", id)
+ if err != nil {
+ return err
+ }
+ if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go
new file mode 100644
index 0000000..ddb219c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_update.go
@@ -0,0 +1,24 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ConfigUpdate attempts to update a config
+func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
+ id, err := trimID("config", id)
+ if err != nil {
+ return err
+ }
+ if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", version.String())
+ resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go
new file mode 100644
index 0000000..2e7a13e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_attach.go
@@ -0,0 +1,65 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerAttach attaches a connection to a container in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ query := url.Values{}
+ if options.Stream {
+ query.Set("stream", "1")
+ }
+ if options.Stdin {
+ query.Set("stdin", "1")
+ }
+ if options.Stdout {
+ query.Set("stdout", "1")
+ }
+ if options.Stderr {
+ query.Set("stderr", "1")
+ }
+ if options.DetachKeys != "" {
+ query.Set("detachKeys", options.DetachKeys)
+ }
+ if options.Logs {
+ query.Set("logs", "1")
+ }
+
+ return cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{
+ "Content-Type": {"text/plain"},
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
new file mode 100644
index 0000000..9b46a1f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -0,0 +1,60 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerCommit applies changes to a container and creates a new tagged image.
+func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options container.CommitOptions) (container.CommitResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.CommitResponse{}, err
+ }
+
+ var repository, tag string
+ if options.Reference != "" {
+ ref, err := reference.ParseNormalizedNamed(options.Reference)
+ if err != nil {
+ return container.CommitResponse{}, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return container.CommitResponse{}, errors.New("refusing to create a tag with a digest reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ repository = reference.FamiliarName(ref)
+ }
+
+ query := url.Values{}
+ query.Set("container", containerID)
+ query.Set("repo", repository)
+ query.Set("tag", tag)
+ query.Set("comment", options.Comment)
+ query.Set("author", options.Author)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+ if !options.Pause {
+ query.Set("pause", "0")
+ }
+
+ var response container.CommitResponse
+ resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go
new file mode 100644
index 0000000..39584d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_copy.go
@@ -0,0 +1,104 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerStatPath returns stat information about a path inside the container filesystem.
+func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.PathStat{}, err
+ }
+
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+
+ resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.PathStat{}, err
+ }
+ return getContainerPathStatFromHeader(resp.Header)
+}
+
+// CopyToContainer copies content into the container filesystem.
+// Note that `content` must be a Reader for a TAR archive
+func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
+ // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
+ if !options.AllowOverwriteDirWithFile {
+ query.Set("noOverwriteDirNonDir", "true")
+ }
+
+ if options.CopyUIDGID {
+ query.Set("copyUIDGID", "true")
+ }
+
+ response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, content, nil)
+ defer ensureReaderClosed(response)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// CopyFromContainer gets the content from the container and returns it as a Reader
+// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
+func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return nil, container.PathStat{}, err
+ }
+
+ query := make(url.Values, 1)
+ query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil)
+ if err != nil {
+ return nil, container.PathStat{}, err
+ }
+
+ // In order to get the copy behavior right, we need to know information
+ // about both the source and the destination. The response headers include
+ // stat info about the source that we can use in deciding exactly how to
+ // copy it locally. Along with the stat info about the local destination,
+ // we have everything we need to handle the multiple possibilities there
+ // can be when copying a file/dir from one location to another file/dir.
+ stat, err := getContainerPathStatFromHeader(resp.Header)
+ if err != nil {
+ return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
+ }
+ return resp.Body, stat, err
+}
+
+func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) {
+ var stat container.PathStat
+
+ encodedStat := header.Get("X-Docker-Container-Path-Stat")
+ statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
+
+ err := json.NewDecoder(statDecoder).Decode(&stat)
+ if err != nil {
+ err = fmt.Errorf("unable to decode container path stat header: %s", err)
+ }
+
+ return stat, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
new file mode 100644
index 0000000..9bb106f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -0,0 +1,168 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/url"
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/versions"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ContainerCreate creates a new container based on the given configuration.
+// It can be associated with a name, but it's not mandatory.
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) {
+ var response container.CreateResponse
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return response, err
+ }
+
+ if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
+ return response, err
+ }
+ if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil {
+ return response, err
+ }
+ if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil {
+ return response, err
+ }
+ if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil {
+ return response, err
+ }
+
+ if hostConfig != nil {
+ if versions.LessThan(cli.ClientVersion(), "1.25") {
+ // When using API 1.24 and under, the client is responsible for removing the container
+ hostConfig.AutoRemove = false
+ }
+ if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") {
+ // KernelMemory was added in API 1.40, and deprecated in API 1.42
+ hostConfig.KernelMemory = 0
+ }
+ if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") {
+ // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
+ hostConfig.ConsoleSize = [2]uint{0, 0}
+ }
+ if versions.LessThan(cli.ClientVersion(), "1.44") {
+ for _, m := range hostConfig.Mounts {
+ if m.BindOptions != nil {
+ // ReadOnlyNonRecursive can be safely ignored when API < 1.44
+ if m.BindOptions.ReadOnlyForceRecursive {
+ return response, errors.New("bind-recursive=readonly requires API v1.44 or later")
+ }
+ if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") {
+ return response, errors.New("bind-recursive=disabled requires API v1.40 or later")
+ }
+ }
+ }
+ }
+
+ hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd)
+ hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop)
+ }
+
+ // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified.
+ if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") {
+ config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
+ }
+
+ query := url.Values{}
+ if p := formatPlatform(platform); p != "" {
+ query.Set("platform", p)
+ }
+
+ if containerName != "" {
+ query.Set("name", containerName)
+ }
+
+ body := container.CreateRequest{
+ Config: config,
+ HostConfig: hostConfig,
+ NetworkingConfig: networkingConfig,
+ }
+
+ resp, err := cli.post(ctx, "/containers/create", query, body, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
+
+// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7).
+//
+// Similar to containerd's platforms.Format(), but does allow components to be
+// omitted (e.g. pass "architecture" only, without "os":
+// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263
+func formatPlatform(platform *ocispec.Platform) string {
+ if platform == nil {
+ return ""
+ }
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
+
+// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined.
+func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool {
+ if networkingConfig == nil {
+ return false
+ }
+ for _, endpoint := range networkingConfig.EndpointsConfig {
+ if endpoint.MacAddress != "" {
+ return true
+ }
+ }
+ return false
+}
+
+// allCapabilities is a magic value for "all capabilities"
+const allCapabilities = "ALL"
+
+// normalizeCapabilities normalizes capabilities to their canonical form,
+// removes duplicates, and sorts the results.
+//
+// It is similar to [github.com/docker/docker/oci/caps.NormalizeLegacyCapabilities],
+// but performs no validation based on supported capabilities.
+func normalizeCapabilities(caps []string) []string {
+ var normalized []string
+
+ unique := make(map[string]struct{})
+ for _, c := range caps {
+ c = normalizeCap(c)
+ if _, ok := unique[c]; ok {
+ continue
+ }
+ unique[c] = struct{}{}
+ normalized = append(normalized, c)
+ }
+
+ sort.Strings(normalized)
+ return normalized
+}
+
+// normalizeCap normalizes a capability to its canonical format by upper-casing
+// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL"
+// magic-value.
+func normalizeCap(cap string) string {
+ cap = strings.ToUpper(cap)
+ if cap == allCapabilities {
+ return cap
+ }
+ if !strings.HasPrefix(cap, "CAP_") {
+ cap = "CAP_" + cap
+ }
+ return cap
+}
diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go
new file mode 100644
index 0000000..5240189
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_diff.go
@@ -0,0 +1,30 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerDiff shows differences in a container filesystem since it was started.
+func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var changes []container.FilesystemChange
+ err = json.NewDecoder(resp.Body).Decode(&changes)
+ if err != nil {
+ return nil, err
+ }
+ return changes, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go
new file mode 100644
index 0000000..a39ec71
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_exec.go
@@ -0,0 +1,81 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerExecCreate creates a new exec configuration to run an exec process.
+func (cli *Client) ContainerExecCreate(ctx context.Context, containerID string, options container.ExecOptions) (container.ExecCreateResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.ExecCreateResponse{}, err
+ }
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return container.ExecCreateResponse{}, err
+ }
+
+ if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil {
+ return container.ExecCreateResponse{}, err
+ }
+ if versions.LessThan(cli.ClientVersion(), "1.42") {
+ options.ConsoleSize = nil
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, options, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.ExecCreateResponse{}, err
+ }
+
+ var response container.ExecCreateResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
+
+// ContainerExecStart starts an exec process already created in the docker host.
+func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error {
+ if versions.LessThan(cli.ClientVersion(), "1.42") {
+ config.ConsoleSize = nil
+ }
+ resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
+
+// ContainerExecAttach attaches a connection to an exec process in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) {
+ if versions.LessThan(cli.ClientVersion(), "1.42") {
+ config.ConsoleSize = nil
+ }
+ return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{
+ "Content-Type": {"application/json"},
+ })
+}
+
+// ContainerExecInspect returns information about a specific exec process on the docker host.
+func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) {
+ var response container.ExecInspect
+ resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go
new file mode 100644
index 0000000..360d527
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_export.go
@@ -0,0 +1,24 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+)
+
+// ContainerExport retrieves the raw contents of a container
+// and returns them as an io.ReadCloser. It's up to the caller
+// to close the stream.
+func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
new file mode 100644
index 0000000..6000318
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_inspect.go
@@ -0,0 +1,57 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerInspect returns the container information.
+func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.InspectResponse{}, err
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.InspectResponse{}, err
+ }
+
+ var response container.InspectResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
+
+// ContainerInspectWithRaw returns the container information and its raw representation.
+func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.InspectResponse{}, nil, err
+ }
+
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.InspectResponse{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return container.InspectResponse{}, nil, err
+ }
+
+ var response container.InspectResponse
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go
new file mode 100644
index 0000000..22767ae
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_kill.go
@@ -0,0 +1,23 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+)
+
+// ContainerKill terminates the container process but does not remove the container from the docker host.
+func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if signal != "" {
+ query.Set("signal", signal)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
new file mode 100644
index 0000000..510bcdf
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_list.go
@@ -0,0 +1,56 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+)
+
+// ContainerList returns the list of containers in the docker host.
+func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) {
+ query := url.Values{}
+
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ if options.Limit > 0 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Since != "" {
+ query.Set("since", options.Since)
+ }
+
+ if options.Before != "" {
+ query.Set("before", options.Before)
+ }
+
+ if options.Size {
+ query.Set("size", "1")
+ }
+
+ if options.Filters.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/containers/json", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []container.Summary
+ err = json.NewDecoder(resp.Body).Decode(&containers)
+ return containers, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go
new file mode 100644
index 0000000..ae30f8d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_logs.go
@@ -0,0 +1,85 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ timetypes "github.com/docker/docker/api/types/time"
+ "github.com/pkg/errors"
+)
+
+// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
+// It's up to the caller to close the stream.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, errors.Wrap(err, `invalid value for "since"`)
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, time.Now())
+ if err != nil {
+ return nil, errors.Wrap(err, `invalid value for "until"`)
+ }
+ query.Set("until", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go
new file mode 100644
index 0000000..5cc2984
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_pause.go
@@ -0,0 +1,15 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// ContainerPause pauses the main process of a given container without terminating it.
+func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go
new file mode 100644
index 0000000..3176be5
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_prune.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+)
+
+// ContainersPrune requests the daemon to delete unused data
+func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil {
+ return container.PruneReport{}, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return container.PruneReport{}, err
+ }
+
+ resp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.PruneReport{}, err
+ }
+
+ var report container.PruneReport
+ if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
+ return container.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
new file mode 100644
index 0000000..6661351
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_remove.go
@@ -0,0 +1,32 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerRemove kills and removes a container from the docker host.
+func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.RemoveVolumes {
+ query.Set("v", "1")
+ }
+ if options.RemoveLinks {
+ query.Set("link", "1")
+ }
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go
new file mode 100644
index 0000000..0a09231
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_rename.go
@@ -0,0 +1,20 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+)
+
+// ContainerRename changes the name of a given container.
+func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ query.Set("name", newContainerName)
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go
new file mode 100644
index 0000000..725c08a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_resize.go
@@ -0,0 +1,38 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerResize changes the size of the tty for a container.
+func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+ return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
+}
+
+// ContainerExecResize changes the size of the tty for an exec process running inside a container.
+func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error {
+ execID, err := trimID("exec", execID)
+ if err != nil {
+ return err
+ }
+ return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
+}
+
+func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
+ // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
+ query := url.Values{}
+ query.Set("h", strconv.FormatUint(uint64(height), 10))
+ query.Set("w", strconv.FormatUint(uint64(width), 10))
+
+ resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go
new file mode 100644
index 0000000..50559ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_restart.go
@@ -0,0 +1,41 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerRestart stops and starts a container again.
+// It makes the daemon wait for the container to be up again for
+// a specific amount of time, given the timeout.
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.Timeout != nil {
+ query.Set("t", strconv.Itoa(*options.Timeout))
+ }
+ if options.Signal != "" {
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return err
+ }
+ if versions.GreaterThanOrEqualTo(cli.version, "1.42") {
+ query.Set("signal", options.Signal)
+ }
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go
new file mode 100644
index 0000000..b81ed3e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_start.go
@@ -0,0 +1,28 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerStart sends a request to the docker daemon to start a container.
+func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if len(options.CheckpointID) != 0 {
+ query.Set("checkpoint", options.CheckpointID)
+ }
+ if len(options.CheckpointDir) != 0 {
+ query.Set("checkpoint-dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
new file mode 100644
index 0000000..a66b90c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stats.go
@@ -0,0 +1,56 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerStats returns near realtime stats for a given container.
+// It's up to the caller to close the io.ReadCloser returned.
+func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.StatsResponseReader{}, err
+ }
+
+ query := url.Values{}
+ query.Set("stream", "0")
+ if stream {
+ query.Set("stream", "1")
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return container.StatsResponseReader{}, err
+ }
+
+ return container.StatsResponseReader{
+ Body: resp.Body,
+ OSType: getDockerOS(resp.Header.Get("Server")),
+ }, nil
+}
+
+// ContainerStatsOneShot gets a single stat entry from a container.
+// It differs from `ContainerStats` in that the API should not wait to prime the stats
+func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.StatsResponseReader{}, err
+ }
+
+ query := url.Values{}
+ query.Set("stream", "0")
+ query.Set("one-shot", "1")
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return container.StatsResponseReader{}, err
+ }
+
+ return container.StatsResponseReader{
+ Body: resp.Body,
+ OSType: getDockerOS(resp.Header.Get("Server")),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go
new file mode 100644
index 0000000..eb0129c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stop.go
@@ -0,0 +1,45 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerStop stops a container. In case the container fails to stop
+// gracefully within a time frame specified by the timeout argument,
+// it is forcefully terminated (killed).
+//
+// If the timeout is nil, the container's StopTimeout value is used, if set,
+// otherwise the engine default. A negative timeout value can be specified,
+// meaning no timeout, i.e. no forceful termination is performed.
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.Timeout != nil {
+ query.Set("t", strconv.Itoa(*options.Timeout))
+ }
+ if options.Signal != "" {
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return err
+ }
+ if versions.GreaterThanOrEqualTo(cli.version, "1.42") {
+ query.Set("signal", options.Signal)
+ }
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go
new file mode 100644
index 0000000..12c8b78
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_top.go
@@ -0,0 +1,33 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerTop shows process information from within a container.
+func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.TopResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.TopResponse{}, err
+ }
+
+ query := url.Values{}
+ if len(arguments) > 0 {
+ query.Set("ps_args", strings.Join(arguments, " "))
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.TopResponse{}, err
+ }
+
+ var response container.TopResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go
new file mode 100644
index 0000000..f602549
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_unpause.go
@@ -0,0 +1,15 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// ContainerUnpause resumes the process execution within a container
+func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go
new file mode 100644
index 0000000..7f0cf62
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_update.go
@@ -0,0 +1,26 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/container"
+)
+
+// ContainerUpdate updates the resources of a container.
+func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) {
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ return container.UpdateResponse{}, err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return container.UpdateResponse{}, err
+ }
+
+ var response container.UpdateResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go
new file mode 100644
index 0000000..bda4a9e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_wait.go
@@ -0,0 +1,122 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "io"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */
+
+// ContainerWait waits until the specified container is in a certain state
+// indicated by the given condition, either "not-running" (default),
+// "next-exit", or "removed".
+//
+// If this client's API version is before 1.30, condition is ignored and
+// ContainerWait will return immediately with the two channels, as the server
+// will wait as if the condition were "not-running".
+//
+// If this client's API version is at least 1.30, ContainerWait blocks until
+// the request has been acknowledged by the server (with a response header),
+// then returns two channels on which the caller can wait for the exit status
+// of the container or an error if there was a problem either beginning the
+// wait request or in getting the response. This allows the caller to
+// synchronize ContainerWait with other calls, such as specifying a
+// "next-exit" condition before issuing a ContainerStart request.
+func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
+ resultC := make(chan container.WaitResponse)
+ errC := make(chan error, 1)
+
+ containerID, err := trimID("container", containerID)
+ if err != nil {
+ errC <- err
+ return resultC, errC
+ }
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ errC <- err
+ return resultC, errC
+ }
+ if versions.LessThan(cli.ClientVersion(), "1.30") {
+ return cli.legacyContainerWait(ctx, containerID)
+ }
+
+ query := url.Values{}
+ if condition != "" {
+ query.Set("condition", string(condition))
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
+ if err != nil {
+ defer ensureReaderClosed(resp)
+ errC <- err
+ return resultC, errC
+ }
+
+ go func() {
+ defer ensureReaderClosed(resp)
+
+ responseText := bytes.NewBuffer(nil)
+ stream := io.TeeReader(resp.Body, responseText)
+
+ var res container.WaitResponse
+ if err := json.NewDecoder(stream).Decode(&res); err != nil {
+ // NOTE(nicks): The /wait API does not work well with HTTP proxies.
+ // At any time, the proxy could cut off the response stream.
+ //
+ // But because the HTTP status has already been written, the proxy's
+ // only option is to write a plaintext error message.
+ //
+ // If there's a JSON parsing error, read the real error message
+ // off the body and send it to the client.
+ if errors.As(err, new(*json.SyntaxError)) {
+ _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit))
+ errC <- errors.New(responseText.String())
+ } else {
+ errC <- err
+ }
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
+
+// legacyContainerWait returns immediately and doesn't have an option to wait
+// until the container is removed.
+func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) {
+ resultC := make(chan container.WaitResponse)
+ errC := make(chan error)
+
+ go func() {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
+ if err != nil {
+ errC <- err
+ return
+ }
+ defer ensureReaderClosed(resp)
+
+ var res container.WaitResponse
+ if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go
new file mode 100644
index 0000000..ed78812
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/disk_usage.go
@@ -0,0 +1,33 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+)
+
+// DiskUsage requests the current data usage from the daemon
+func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) {
+ var query url.Values
+ if len(options.Types) > 0 {
+ query = url.Values{}
+ for _, t := range options.Types {
+ query.Add("type", string(t))
+ }
+ }
+
+ resp, err := cli.get(ctx, "/system/df", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return types.DiskUsage{}, err
+ }
+
+ var du types.DiskUsage
+ if err := json.NewDecoder(resp.Body).Decode(&du); err != nil {
+ return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+ return du, nil
+}
diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go
new file mode 100644
index 0000000..b8654b2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/distribution_inspect.go
@@ -0,0 +1,39 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types/registry"
+)
+
+// DistributionInspect returns the image digest with the full manifest.
+func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) {
+ if imageRef == "" {
+ return registry.DistributionInspect{}, objectNotFoundError{object: "distribution", id: imageRef}
+ }
+
+ if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil {
+ return registry.DistributionInspect{}, err
+ }
+
+ var headers http.Header
+ if encodedRegistryAuth != "" {
+ headers = http.Header{
+ registry.AuthHeader: {encodedRegistryAuth},
+ }
+ }
+
+ // Contact the registry to retrieve digest and platform information
+ resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return registry.DistributionInspect{}, err
+ }
+
+ var distributionInspect registry.DistributionInspect
+ err = json.NewDecoder(resp.Body).Decode(&distributionInspect)
+ return distributionInspect, err
+}
diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go
new file mode 100644
index 0000000..61dd45c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/envvars.go
@@ -0,0 +1,90 @@
+package client // import "github.com/docker/docker/client"
+
+const (
+ // EnvOverrideHost is the name of the environment variable that can be used
+ // to override the default host to connect to (DefaultDockerHost).
+ //
+ // This env-var is read by FromEnv and WithHostFromEnv and when set to a
+ // non-empty value, takes precedence over the default host (which is platform
+ // specific), or any host already set.
+ EnvOverrideHost = "DOCKER_HOST"
+
+ // EnvOverrideAPIVersion is the name of the environment variable that can
+ // be used to override the API version to use. Value should be
+ // formatted as MAJOR.MINOR, for example, "1.19".
+ //
+ // This env-var is read by FromEnv and WithVersionFromEnv and when set to a
+ // non-empty value, takes precedence over API version negotiation.
+ //
+ // This environment variable should be used for debugging purposes only, as
+ // it can set the client to use an incompatible (or invalid) API version.
+ EnvOverrideAPIVersion = "DOCKER_API_VERSION"
+
+ // EnvOverrideCertPath is the name of the environment variable that can be
+ // used to specify the directory from which to load the TLS certificates
+ // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure
+ // the Client for a TCP connection protected by TLS client authentication.
+ //
+ // TLS certificate verification is enabled by default if the Client is configured
+ // to use a TLS connection. Refer to EnvTLSVerify below to learn how to
+ // disable verification for testing purposes.
+ //
+ // WARNING: Access to the remote API is equivalent to root access to the
+ // host where the daemon runs. Do not expose the API without protection,
+ // and only if needed. Make sure you are familiar with the "daemon attack
+ // surface" (https://docs.docker.com/go/attack-surface/).
+ //
+ // For local access to the API, it is recommended to connect with the daemon
+ // using the default local socket connection (on Linux), or the named pipe
+ // (on Windows).
+ //
+ // If you need to access the API of a remote daemon, consider using an SSH
+ // (ssh://) connection, which is easier to set up, and requires no additional
+ // configuration if the host is accessible using ssh.
+ //
+ // If you cannot use the alternatives above, and you must expose the API over
+ // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/
+ // to learn how to configure the daemon and client to use a TCP connection
+ // with TLS client authentication. Make sure you know the differences between
+ // a regular TLS connection and a TLS connection protected by TLS client
+ // authentication, and verify that the API cannot be accessed by other clients.
+ EnvOverrideCertPath = "DOCKER_CERT_PATH"
+
+ // EnvTLSVerify is the name of the environment variable that can be used to
+ // enable or disable TLS certificate verification. When set to a non-empty
+ // value, TLS certificate verification is enabled, and the client is configured
+ // to use a TLS connection, using certificates from the default directories
+ // (within `~/.docker`); refer to EnvOverrideCertPath above for additional
+ // details.
+ //
+ // WARNING: Access to the remote API is equivalent to root access to the
+ // host where the daemon runs. Do not expose the API without protection,
+ // and only if needed. Make sure you are familiar with the "daemon attack
+ // surface" (https://docs.docker.com/go/attack-surface/).
+ //
+ // Before setting up your client and daemon to use a TCP connection with TLS
+ // client authentication, consider using one of the alternatives mentioned
+ // in EnvOverrideCertPath above.
+ //
+ // Disabling TLS certificate verification (for testing purposes)
+ //
+ // TLS certificate verification is enabled by default if the Client is configured
+ // to use a TLS connection, and it is highly recommended to keep verification
+ // enabled to prevent machine-in-the-middle attacks. Refer to the documentation
+ // at https://docs.docker.com/engine/security/protect-access/ and pages linked
+ // from that page to learn how to configure the daemon and client to use a
+ // TCP connection with TLS client authentication enabled.
+ //
+ // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to
+ // disable TLS certificate verification. Disabling verification is insecure,
+ // so should only be done for testing purposes. From the Go documentation
+ // (https://pkg.go.dev/crypto/tls#Config):
+ //
+ // InsecureSkipVerify controls whether a client verifies the server's
+ // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls
+ // accepts any certificate presented by the server and any host name in that
+ // certificate. In this mode, TLS is susceptible to machine-in-the-middle
+ // attacks unless custom verification is used. This should be used only for
+ // testing or in combination with VerifyConnection or VerifyPeerCertificate.
+ EnvTLSVerify = "DOCKER_TLS_VERIFY"
+)
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
new file mode 100644
index 0000000..609f92c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -0,0 +1,85 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/docker/errdefs"
+)
+
+// errConnectionFailed implements an error returned when connection failed.
+type errConnectionFailed struct {
+ error
+}
+
+// Error returns a string representation of an errConnectionFailed
+func (e errConnectionFailed) Error() string {
+ return e.error.Error()
+}
+
+func (e errConnectionFailed) Unwrap() error {
+ return e.error
+}
+
+// IsErrConnectionFailed returns true if the error is caused by connection failed.
+func IsErrConnectionFailed(err error) bool {
+ return errors.As(err, &errConnectionFailed{})
+}
+
+// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
+//
+// Deprecated: this function was only used internally, and will be removed in the next release.
+func ErrorConnectionFailed(host string) error {
+ return connectionFailed(host)
+}
+
+// connectionFailed returns an error with host in the error message when connection
+// to docker daemon failed.
+func connectionFailed(host string) error {
+ var err error
+ if host == "" {
+ err = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
+ } else {
+ err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
+ }
+ return errConnectionFailed{error: err}
+}
+
+// IsErrNotFound returns true if the error is a NotFound error, which is returned
+// by the API when some object is not found. It is an alias for [errdefs.IsNotFound].
+func IsErrNotFound(err error) bool {
+ return errdefs.IsNotFound(err)
+}
+
+type objectNotFoundError struct {
+ object string
+ id string
+}
+
+func (e objectNotFoundError) NotFound() {}
+
+func (e objectNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
+}
+
+// NewVersionError returns an error if the APIVersion required is less than the
+// current supported version.
+//
+// It performs API-version negotiation if the Client is configured with this
+// option, otherwise it assumes the latest API version is used.
+func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error {
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return err
+ }
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
new file mode 100644
index 0000000..c71d2a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/events.go
@@ -0,0 +1,100 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// Events returns a stream of events in the daemon. It's up to the caller to close the stream
+// by cancelling the context. Once the stream has been completely read an io.EOF error will
+// be sent over the error channel. If an error is sent all processing will be stopped. It's up
+// to the caller to reopen the stream in the event of an error by reinvoking this method.
+func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) {
+ messages := make(chan events.Message)
+ errs := make(chan error, 1)
+
+ started := make(chan struct{})
+ go func() {
+ defer close(errs)
+
+ query, err := buildEventsQueryParams(cli.version, options)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+
+ resp, err := cli.get(ctx, "/events", query, nil)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+ defer resp.Body.Close()
+
+ decoder := json.NewDecoder(resp.Body)
+
+ close(started)
+ for {
+ select {
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ default:
+ var event events.Message
+ if err := decoder.Decode(&event); err != nil {
+ errs <- err
+ return
+ }
+
+ select {
+ case messages <- event:
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ }
+ }
+ }
+ }()
+ <-started
+
+ return messages, errs
+}
+
+func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) {
+ query := url.Values{}
+ ref := time.Now()
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("until", ts)
+ }
+
+ if options.Filters.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
new file mode 100644
index 0000000..2c78fad
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -0,0 +1,139 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+)
+
+// postHijacked sends a POST request and hijacks the connection.
+func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
+ bodyEncoded, err := encodeData(body)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp")
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ if versions.LessThan(cli.ClientVersion(), "1.42") {
+ // Prior to 1.42, Content-Type is always set to raw-stream and not relevant
+ mediaType = ""
+ }
+
+ return types.NewHijackedResponse(conn, mediaType), nil
+}
+
+// DialHijack returns a hijacked connection with negotiated protocol proto.
+func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, meta)
+
+ conn, _, err := setupHijackConn(cli.Dialer(), req, proto)
+ return conn, err
+}
+
+func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.Request, proto string) (_ net.Conn, _ string, retErr error) {
+ ctx := req.Context()
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", proto)
+
+ conn, err := dialer(ctx)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+ defer func() {
+ if retErr != nil {
+ conn.Close()
+ }
+ }()
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ _ = tcpConn.SetKeepAlive(true)
+ _ = tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ hc := &hijackedConn{conn, bufio.NewReader(conn)}
+
+ // Server hijacks the connection, error 'connection closed' expected
+ resp, err := otelhttp.NewTransport(hc).RoundTrip(req)
+ if err != nil {
+ return nil, "", err
+ }
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ _ = resp.Body.Close()
+ return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
+ }
+
+ if hc.r.Buffered() > 0 {
+ // If there is buffered content, wrap the connection. We return an
+ // object that implements CloseWrite if the underlying connection
+ // implements it.
+ if _, ok := hc.Conn.(types.CloseWriter); ok {
+ conn = &hijackedConnCloseWriter{hc}
+ } else {
+ conn = hc
+ }
+ } else {
+ hc.r.Reset(nil)
+ }
+
+ return conn, resp.Header.Get("Content-Type"), nil
+}
+
+// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
+// that a) there was already buffered data in the http layer when Hijack() was
+// called, and b) the underlying net.Conn does *not* implement CloseWrite().
+// hijackedConn does not implement CloseWrite() either.
+type hijackedConn struct {
+ net.Conn
+ r *bufio.Reader
+}
+
+func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := req.Write(c.Conn); err != nil {
+ return nil, err
+ }
+ return http.ReadResponse(c.r, req)
+}
+
+func (c *hijackedConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
+
+// hijackedConnCloseWriter is a hijackedConn which additionally implements
+// CloseWrite(). It is returned by setupHijackConn in the case that a) there
+// was already buffered data in the http layer when Hijack() was called, and b)
+// the underlying net.Conn *does* implement CloseWrite().
+type hijackedConnCloseWriter struct {
+ *hijackedConn
+}
+
+var _ types.CloseWriter = &hijackedConnCloseWriter{}
+
+func (c *hijackedConnCloseWriter) CloseWrite() error {
+ conn := c.Conn.(types.CloseWriter)
+ return conn.CloseWrite()
+}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
new file mode 100644
index 0000000..6e2a406
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_build.go
@@ -0,0 +1,182 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+)
+
+// ImageBuild sends a request to the daemon to build images.
+// The Body in the response implements an io.ReadCloser and it's up to the caller to
+// close it.
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+ query, err := cli.imageBuildOptionsToQuery(ctx, options)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ buf, err := json.Marshal(options.AuthConfigs)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ headers := http.Header{}
+ headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+ headers.Set("Content-Type", "application/x-tar")
+
+ resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ return types.ImageBuildResponse{
+ Body: resp.Body,
+ OSType: getDockerOS(resp.Header.Get("Server")),
+ }, nil
+}
+
+func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) {
+ query := url.Values{}
+ if len(options.Tags) > 0 {
+ query["t"] = options.Tags
+ }
+ if len(options.SecurityOpt) > 0 {
+ query["securityopt"] = options.SecurityOpt
+ }
+ if len(options.ExtraHosts) > 0 {
+ query["extrahosts"] = options.ExtraHosts
+ }
+ if options.SuppressOutput {
+ query.Set("q", "1")
+ }
+ if options.RemoteContext != "" {
+ query.Set("remote", options.RemoteContext)
+ }
+ if options.NoCache {
+ query.Set("nocache", "1")
+ }
+ if !options.Remove {
+ // only send value when opting out because the daemon's default is
+ // to remove intermediate containers after a successful build,
+ //
+ // TODO(thaJeztah): deprecate "Remove" option, and provide a "NoRemove" or "Keep" option instead.
+ query.Set("rm", "0")
+ }
+
+ if options.ForceRemove {
+ query.Set("forcerm", "1")
+ }
+
+ if options.PullParent {
+ query.Set("pull", "1")
+ }
+
+ if options.Squash {
+ if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil {
+ return query, err
+ }
+ query.Set("squash", "1")
+ }
+
+ if !container.Isolation.IsDefault(options.Isolation) {
+ query.Set("isolation", string(options.Isolation))
+ }
+
+ if options.CPUSetCPUs != "" {
+ query.Set("cpusetcpus", options.CPUSetCPUs)
+ }
+ if options.NetworkMode != "" && options.NetworkMode != network.NetworkDefault {
+ query.Set("networkmode", options.NetworkMode)
+ }
+ if options.CPUSetMems != "" {
+ query.Set("cpusetmems", options.CPUSetMems)
+ }
+ if options.CPUShares != 0 {
+ query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
+ }
+ if options.CPUQuota != 0 {
+ query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
+ }
+ if options.CPUPeriod != 0 {
+ query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
+ }
+ if options.Memory != 0 {
+ query.Set("memory", strconv.FormatInt(options.Memory, 10))
+ }
+ if options.MemorySwap != 0 {
+ query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
+ }
+ if options.CgroupParent != "" {
+ query.Set("cgroupparent", options.CgroupParent)
+ }
+ if options.ShmSize != 0 {
+ query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
+ }
+ if options.Dockerfile != "" {
+ query.Set("dockerfile", options.Dockerfile)
+ }
+ if options.Target != "" {
+ query.Set("target", options.Target)
+ }
+ if len(options.Ulimits) != 0 {
+ ulimitsJSON, err := json.Marshal(options.Ulimits)
+ if err != nil {
+ return query, err
+ }
+ query.Set("ulimits", string(ulimitsJSON))
+ }
+ if len(options.BuildArgs) != 0 {
+ buildArgsJSON, err := json.Marshal(options.BuildArgs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("buildargs", string(buildArgsJSON))
+ }
+ if len(options.Labels) != 0 {
+ labelsJSON, err := json.Marshal(options.Labels)
+ if err != nil {
+ return query, err
+ }
+ query.Set("labels", string(labelsJSON))
+ }
+ if len(options.CacheFrom) != 0 {
+ cacheFromJSON, err := json.Marshal(options.CacheFrom)
+ if err != nil {
+ return query, err
+ }
+ query.Set("cachefrom", string(cacheFromJSON))
+ }
+ if options.SessionID != "" {
+ query.Set("session", options.SessionID)
+ }
+ if options.Platform != "" {
+ if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil {
+ return query, err
+ }
+ query.Set("platform", strings.ToLower(options.Platform))
+ }
+ if options.BuildID != "" {
+ query.Set("buildid", options.BuildID)
+ }
+ if options.Version != "" {
+ query.Set("version", string(options.Version))
+ }
+
+ if options.Outputs != nil {
+ outputsJSON, err := json.Marshal(options.Outputs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("outputs", string(outputsJSON))
+ }
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
new file mode 100644
index 0000000..0357051
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_create.go
@@ -0,0 +1,40 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/registry"
+)
+
+// ImageCreate creates a new image based on the parent options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(parentReference)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ if options.Platform != "" {
+ query.Set("platform", strings.ToLower(options.Platform))
+ }
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) {
+ return cli.post(ctx, "/images/create", query, nil, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go
new file mode 100644
index 0000000..49381fb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_history.go
@@ -0,0 +1,56 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageHistoryWithPlatform sets the platform for the image history operation.
+func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption {
+ return imageHistoryOptionFunc(func(opt *imageHistoryOpts) error {
+ if opt.apiOptions.Platform != nil {
+ return fmt.Errorf("platform already set to %s", *opt.apiOptions.Platform)
+ }
+ opt.apiOptions.Platform = &platform
+ return nil
+ })
+}
+
+// ImageHistory returns the changes in an image in history format.
+func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) ([]image.HistoryResponseItem, error) {
+ query := url.Values{}
+
+ var opts imageHistoryOpts
+ for _, o := range historyOpts {
+ if err := o.Apply(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ if opts.apiOptions.Platform != nil {
+ if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil {
+ return nil, err
+ }
+
+ p, err := encodePlatform(opts.apiOptions.Platform)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("platform", p)
+ }
+
+ resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var history []image.HistoryResponseItem
+ err = json.NewDecoder(resp.Body).Decode(&history)
+ return history, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_history_opts.go b/vendor/github.com/docker/docker/client/image_history_opts.go
new file mode 100644
index 0000000..6d3494d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_history_opts.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageHistoryOption is a type representing functional options for the image history operation.
+type ImageHistoryOption interface {
+ Apply(*imageHistoryOpts) error
+}
+type imageHistoryOptionFunc func(opt *imageHistoryOpts) error
+
+func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error {
+ return f(o)
+}
+
+type imageHistoryOpts struct {
+ apiOptions image.HistoryOptions
+}
diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go
new file mode 100644
index 0000000..5849d85
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_import.go
@@ -0,0 +1,48 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "strings"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageImport creates a new image based on the source options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) {
+ if ref != "" {
+ // Check if the given image name can be resolved
+ if _, err := reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{}
+ if source.SourceName != "" {
+ query.Set("fromSrc", source.SourceName)
+ }
+ if ref != "" {
+ query.Set("repo", ref)
+ }
+ if options.Tag != "" {
+ query.Set("tag", options.Tag)
+ }
+ if options.Message != "" {
+ query.Set("message", options.Message)
+ }
+ if options.Platform != "" {
+ query.Set("platform", strings.ToLower(options.Platform))
+ }
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
new file mode 100644
index 0000000..1161195
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -0,0 +1,65 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageInspect returns the image information.
+func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (image.InspectResponse, error) {
+ if imageID == "" {
+ return image.InspectResponse{}, objectNotFoundError{object: "image", id: imageID}
+ }
+
+ var opts imageInspectOpts
+ for _, opt := range inspectOpts {
+ if err := opt.Apply(&opts); err != nil {
+ return image.InspectResponse{}, fmt.Errorf("error applying image inspect option: %w", err)
+ }
+ }
+
+ query := url.Values{}
+ if opts.apiOptions.Manifests {
+ if err := cli.NewVersionError(ctx, "1.48", "manifests"); err != nil {
+ return image.InspectResponse{}, err
+ }
+ query.Set("manifests", "1")
+ }
+
+ resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return image.InspectResponse{}, err
+ }
+
+ buf := opts.raw
+ if buf == nil {
+ buf = &bytes.Buffer{}
+ }
+
+ if _, err := io.Copy(buf, resp.Body); err != nil {
+ return image.InspectResponse{}, err
+ }
+
+ var response image.InspectResponse
+ err = json.Unmarshal(buf.Bytes(), &response)
+ return response, err
+}
+
+// ImageInspectWithRaw returns the image information and its raw representation.
+//
+// Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option.
+func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error) {
+ var buf bytes.Buffer
+ resp, err := cli.ImageInspect(ctx, imageID, ImageInspectWithRawResponse(&buf))
+ if err != nil {
+ return image.InspectResponse{}, nil, err
+ }
+ return resp, buf.Bytes(), err
+}
diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/docker/docker/client/image_inspect_opts.go
new file mode 100644
index 0000000..2607f36
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_inspect_opts.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "bytes"
+
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageInspectOption is a type representing functional options for the image inspect operation.
+type ImageInspectOption interface {
+ Apply(*imageInspectOpts) error
+}
+type imageInspectOptionFunc func(opt *imageInspectOpts) error
+
+func (f imageInspectOptionFunc) Apply(o *imageInspectOpts) error {
+ return f(o)
+}
+
+// ImageInspectWithRawResponse instructs the client to additionally store the
+// raw inspect response in the provided buffer.
+func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption {
+ return imageInspectOptionFunc(func(opts *imageInspectOpts) error {
+ opts.raw = raw
+ return nil
+ })
+}
+
+// ImageInspectWithManifests sets manifests API option for the image inspect operation.
+// This option is only available for API version 1.48 and up.
+// With this option set, the image inspect operation response will have the
+// [image.InspectResponse.Manifests] field populated if the server is multi-platform capable.
+func ImageInspectWithManifests(manifests bool) ImageInspectOption {
+ return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
+ clientOpts.apiOptions.Manifests = manifests
+ return nil
+ })
+}
+
+// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
+func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
+ return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
+ clientOpts.apiOptions = opts
+ return nil
+ })
+}
+
+type imageInspectOpts struct {
+ raw *bytes.Buffer
+ apiOptions image.InspectOptions
+}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
new file mode 100644
index 0000000..e1911eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -0,0 +1,67 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ImageList returns a list of images in the docker host.
+//
+// Experimental: Setting the [options.Manifest] will populate
+// [image.Summary.Manifests] with information about image manifests.
+// This is experimental and might change in the future without any backward
+// compatibility.
+func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
+ var images []image.Summary
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return images, err
+ }
+
+ query := url.Values{}
+
+ optionFilters := options.Filters
+ referenceFilters := optionFilters.Get("reference")
+ if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
+ query.Set("filter", referenceFilters[0])
+ for _, filterValue := range referenceFilters {
+ optionFilters.Del("reference", filterValue)
+ }
+ }
+ if optionFilters.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
+ if err != nil {
+ return images, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ if options.All {
+ query.Set("all", "1")
+ }
+ if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
+ query.Set("shared-size", "1")
+ }
+ if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") {
+ query.Set("manifests", "1")
+ }
+
+ resp, err := cli.get(ctx, "/images/json", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return images, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&images)
+ return images, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go
new file mode 100644
index 0000000..d83877d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_load.go
@@ -0,0 +1,54 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageLoad loads an image in the docker host from the client host.
+// It's up to the caller to close the io.ReadCloser in the
+// ImageLoadResponse returned by this function.
+//
+// Platform is an optional parameter that specifies the platform to load from
+// the provided multi-platform image. This is only has effect if the input image
+// is a multi-platform image.
+func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (image.LoadResponse, error) {
+ var opts imageLoadOpts
+ for _, opt := range loadOpts {
+ if err := opt.Apply(&opts); err != nil {
+ return image.LoadResponse{}, err
+ }
+ }
+
+ query := url.Values{}
+ query.Set("quiet", "0")
+ if opts.apiOptions.Quiet {
+ query.Set("quiet", "1")
+ }
+ if len(opts.apiOptions.Platforms) > 0 {
+ if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil {
+ return image.LoadResponse{}, err
+ }
+
+ p, err := encodePlatforms(opts.apiOptions.Platforms...)
+ if err != nil {
+ return image.LoadResponse{}, err
+ }
+ query["platform"] = p
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{
+ "Content-Type": {"application/x-tar"},
+ })
+ if err != nil {
+ return image.LoadResponse{}, err
+ }
+ return image.LoadResponse{
+ Body: resp.Body,
+ JSON: resp.Header.Get("Content-Type") == "application/json",
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_load_opts.go b/vendor/github.com/docker/docker/client/image_load_opts.go
new file mode 100644
index 0000000..ebcedd4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_load_opts.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/types/image"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageLoadOption is a type representing functional options for the image load operation.
+type ImageLoadOption interface {
+ Apply(*imageLoadOpts) error
+}
+type imageLoadOptionFunc func(opt *imageLoadOpts) error
+
+func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error {
+ return f(o)
+}
+
+type imageLoadOpts struct {
+ apiOptions image.LoadOptions
+}
+
+// ImageLoadWithQuiet sets the quiet option for the image load operation.
+func ImageLoadWithQuiet(quiet bool) ImageLoadOption {
+ return imageLoadOptionFunc(func(opt *imageLoadOpts) error {
+ opt.apiOptions.Quiet = quiet
+ return nil
+ })
+}
+
+// ImageLoadWithPlatforms sets the platforms to be loaded from the image.
+func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption {
+ return imageLoadOptionFunc(func(opt *imageLoadOpts) error {
+ if opt.apiOptions.Platforms != nil {
+ return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms)
+ }
+ opt.apiOptions.Platforms = platforms
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go
new file mode 100644
index 0000000..7c354d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_prune.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImagesPrune requests the daemon to delete unused data
+func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil {
+ return image.PruneReport{}, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return image.PruneReport{}, err
+ }
+
+ resp, err := cli.post(ctx, "/images/prune", query, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return image.PruneReport{}, err
+ }
+
+ var report image.PruneReport
+ if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
+ return image.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
new file mode 100644
index 0000000..4286942
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_pull.go
@@ -0,0 +1,64 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "strings"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/errdefs"
+)
+
+// ImagePull requests the docker host to pull an image from a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+//
+// FIXME(vdemeester): there is currently used in a few way in docker/docker
+// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
+// - if in trusted content, ref is used to pass the reference name, and tag for the digest
+func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ if !options.All {
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ }
+ if options.Platform != "" {
+ query.Set("platform", strings.ToLower(options.Platform))
+ }
+
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+// getAPITagFromNamedRef returns a tag from the specified reference.
+// This function is necessary as long as the docker "server" api expects
+// digests to be sent as tags and makes a distinction between the name
+// and tag/digest part of a reference.
+func getAPITagFromNamedRef(ref reference.Named) string {
+ if digested, ok := ref.(reference.Digested); ok {
+ return digested.Digest().String()
+ }
+ ref = reference.TagNameOnly(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ return tagged.Tag()
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
new file mode 100644
index 0000000..b340bc4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_push.go
@@ -0,0 +1,73 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/errdefs"
+)
+
+// ImagePush requests the docker host to push an image to a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return nil, errors.New("cannot push a digest reference")
+ }
+
+ name := reference.FamiliarName(ref)
+ query := url.Values{}
+ if !options.All {
+ ref = reference.TagNameOnly(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ query.Set("tag", tagged.Tag())
+ }
+ }
+
+ if options.Platform != nil {
+ if err := cli.NewVersionError(ctx, "1.46", "platform"); err != nil {
+ return nil, err
+ }
+
+ p := *options.Platform
+ pJson, err := json.Marshal(p)
+ if err != nil {
+ return nil, fmt.Errorf("invalid platform: %v", err)
+ }
+
+ query.Set("platform", string(pJson))
+ }
+
+ resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
+ if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*http.Response, error) {
+ return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
new file mode 100644
index 0000000..b0c87ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -0,0 +1,31 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+)
+
+// ImageRemove removes an image from the docker host.
+func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) {
+ query := url.Values{}
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+ if !options.PruneChildren {
+ query.Set("noprune", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var dels []image.DeleteResponse
+ err = json.NewDecoder(resp.Body).Decode(&dels)
+ return dels, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go
new file mode 100644
index 0000000..0aa7177
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_save.go
@@ -0,0 +1,41 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+)
+
+// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
+//
+// Platforms is an optional parameter that specifies the platforms to save from the image.
+// This is only has effect if the input image is a multi-platform image.
+func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (io.ReadCloser, error) {
+ var opts imageSaveOpts
+ for _, opt := range saveOpts {
+ if err := opt.Apply(&opts); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{
+ "names": imageIDs,
+ }
+
+ if len(opts.apiOptions.Platforms) > 0 {
+ if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil {
+ return nil, err
+ }
+ p, err := encodePlatforms(opts.apiOptions.Platforms...)
+ if err != nil {
+ return nil, err
+ }
+ query["platform"] = p
+ }
+
+ resp, err := cli.get(ctx, "/images/get", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_save_opts.go b/vendor/github.com/docker/docker/client/image_save_opts.go
new file mode 100644
index 0000000..acd8f28
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_save_opts.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/types/image"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type ImageSaveOption interface {
+ Apply(*imageSaveOpts) error
+}
+
+type imageSaveOptionFunc func(opt *imageSaveOpts) error
+
+func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error {
+ return f(o)
+}
+
+// ImageSaveWithPlatforms sets the platforms to be saved from the image.
+func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption {
+ return imageSaveOptionFunc(func(opt *imageSaveOpts) error {
+ if opt.apiOptions.Platforms != nil {
+ return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms)
+ }
+ opt.apiOptions.Platforms = platforms
+ return nil
+ })
+}
+
+type imageSaveOpts struct {
+ apiOptions image.SaveOptions
+}
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
new file mode 100644
index 0000000..0a7b5ec
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -0,0 +1,54 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/errdefs"
+)
+
+// ImageSearch makes the docker host search by a term in a remote registry.
+// The list of results is not sorted in any fashion.
+func (cli *Client) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) {
+ var results []registry.SearchResult
+ query := url.Values{}
+ query.Set("term", term)
+ if options.Limit > 0 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return results, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
+ defer ensureReaderClosed(resp)
+ if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
+ if privilegeErr != nil {
+ return results, privilegeErr
+ }
+ resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return results, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&results)
+ return results, err
+}
+
+func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) {
+ return cli.get(ctx, "/images/search", query, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
new file mode 100644
index 0000000..ea6b4a1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_tag.go
@@ -0,0 +1,37 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/distribution/reference"
+ "github.com/pkg/errors"
+)
+
+// ImageTag tags an image in the docker host
+func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
+ if _, err := reference.ParseAnyReference(source); err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(target)
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return errors.New("refusing to create a tag with a digest reference")
+ }
+
+ ref = reference.TagNameOnly(ref)
+
+ query := url.Values{}
+ query.Set("repo", reference.FamiliarName(ref))
+ if tagged, ok := ref.(reference.Tagged); ok {
+ query.Set("tag", tagged.Tag())
+ }
+
+ resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go
new file mode 100644
index 0000000..6396f4b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/info.go
@@ -0,0 +1,26 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types/system"
+)
+
+// Info returns information about the docker server.
+func (cli *Client) Info(ctx context.Context) (system.Info, error) {
+ var info system.Info
+ resp, err := cli.get(ctx, "/info", url.Values{}, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return info, err
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
+ return info, fmt.Errorf("Error reading remote info: %v", err)
+ }
+
+ return info, nil
+}
diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go
new file mode 100644
index 0000000..d3572c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/login.go
@@ -0,0 +1,24 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/registry"
+)
+
+// RegistryLogin authenticates the docker server with a given docker registry.
+// It returns unauthorizedError when the authentication fails.
+func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) {
+ resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
+ defer ensureReaderClosed(resp)
+
+ if err != nil {
+ return registry.AuthenticateOKBody{}, err
+ }
+
+ var response registry.AuthenticateOKBody
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go
new file mode 100644
index 0000000..fa7cc34
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_connect.go
@@ -0,0 +1,28 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/network"
+)
+
+// NetworkConnect connects a container to an existent network in the docker host.
+func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
+ networkID, err := trimID("network", networkID)
+ if err != nil {
+ return err
+ }
+
+ containerID, err = trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ nc := network.ConnectOptions{
+ Container: containerID,
+ EndpointConfig: config,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go
new file mode 100644
index 0000000..eef9514
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_create.go
@@ -0,0 +1,40 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// NetworkCreate creates a new network in the docker host.
+func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) {
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return network.CreateResponse{}, err
+ }
+
+ networkCreateRequest := network.CreateRequest{
+ CreateOptions: options,
+ Name: name,
+ }
+ if versions.LessThan(cli.version, "1.44") {
+ enabled := true
+ networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44.
+ }
+
+ resp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return network.CreateResponse{}, err
+ }
+
+ var response network.CreateResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go
new file mode 100644
index 0000000..d8051df
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_disconnect.go
@@ -0,0 +1,28 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/network"
+)
+
+// NetworkDisconnect disconnects a container from an existent network in the docker host.
+func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
+ networkID, err := trimID("network", networkID)
+ if err != nil {
+ return err
+ }
+
+ containerID, err = trimID("container", containerID)
+ if err != nil {
+ return err
+ }
+
+ nd := network.DisconnectOptions{
+ Container: containerID,
+ Force: force,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
new file mode 100644
index 0000000..1387c08
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_inspect.go
@@ -0,0 +1,47 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "net/url"
+
+ "github.com/docker/docker/api/types/network"
+)
+
+// NetworkInspect returns the information for a specific network configured in the docker host.
+func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, error) {
+ networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
+ return networkResource, err
+}
+
+// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
+func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) {
+ networkID, err := trimID("network", networkID)
+ if err != nil {
+ return network.Inspect{}, nil, err
+ }
+ query := url.Values{}
+ if options.Verbose {
+ query.Set("verbose", "true")
+ }
+ if options.Scope != "" {
+ query.Set("scope", options.Scope)
+ }
+
+ resp, err := cli.get(ctx, "/networks/"+networkID, query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return network.Inspect{}, nil, err
+ }
+
+ raw, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return network.Inspect{}, nil, err
+ }
+
+ var nw network.Inspect
+ err = json.NewDecoder(bytes.NewReader(raw)).Decode(&nw)
+ return nw, raw, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
new file mode 100644
index 0000000..e1b4fca
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_list.go
@@ -0,0 +1,32 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/network"
+)
+
+// NetworkList returns the list of networks configured in the docker host.
+func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) {
+ query := url.Values{}
+ if options.Filters.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+ var networkResources []network.Summary
+ resp, err := cli.get(ctx, "/networks", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return networkResources, err
+ }
+ err = json.NewDecoder(resp.Body).Decode(&networkResources)
+ return networkResources, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go
new file mode 100644
index 0000000..90d3679
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_prune.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/network"
+)
+
+// NetworksPrune requests the daemon to delete unused networks
+func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil {
+ return network.PruneReport{}, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return network.PruneReport{}, err
+ }
+
+ resp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return network.PruneReport{}, err
+ }
+
+ var report network.PruneReport
+ if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
+ return network.PruneReport{}, fmt.Errorf("Error retrieving network prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
new file mode 100644
index 0000000..89fdaaf
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_remove.go
@@ -0,0 +1,14 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// NetworkRemove removes an existent network from the docker host.
+func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
+ networkID, err := trimID("network", networkID)
+ if err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
new file mode 100644
index 0000000..5d3343d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_inspect.go
@@ -0,0 +1,33 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NodeInspectWithRaw returns the node information.
+func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ nodeID, err := trimID("node", nodeID)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ var response swarm.Node
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
new file mode 100644
index 0000000..2534f4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.Body).Decode(&nodes)
+ return nodes, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
new file mode 100644
index 0000000..81f8fed
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -0,0 +1,25 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+)
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
+ nodeID, err := trimID("node", nodeID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go
new file mode 100644
index 0000000..10e2186
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_update.go
@@ -0,0 +1,22 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ nodeID, err := trimID("node", nodeID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ query.Set("version", version.String())
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go
new file mode 100644
index 0000000..6f68fc2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/options.go
@@ -0,0 +1,240 @@
+package client
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Opt is a configuration option to initialize a [Client].
+type Opt func(*Client) error
+
+// FromEnv configures the client with values from environment variables. It
+// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv],
+// and [WithVersionFromEnv] options.
+//
+// FromEnv uses the following environment variables:
+//
+// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server.
+// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the
+// API to use, leave empty for latest.
+// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from
+// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem').
+// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification
+// (off by default).
+func FromEnv(c *Client) error {
+ ops := []Opt{
+ WithTLSClientConfigFromEnv(),
+ WithHostFromEnv(),
+ WithVersionFromEnv(),
+ }
+ for _, op := range ops {
+ if err := op(c); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithDialContext applies the dialer to the client transport. This can be
+// used to set the Timeout and KeepAlive settings of the client. It returns
+// an error if the client does not have a [http.Transport] configured.
+func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
+ return func(c *Client) error {
+ if transport, ok := c.client.Transport.(*http.Transport); ok {
+ transport.DialContext = dialContext
+ return nil
+ }
+ return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport)
+ }
+}
+
+// WithHost overrides the client host with the specified one.
+func WithHost(host string) Opt {
+ return func(c *Client) error {
+ hostURL, err := ParseHostURL(host)
+ if err != nil {
+ return err
+ }
+ c.host = host
+ c.proto = hostURL.Scheme
+ c.addr = hostURL.Host
+ c.basePath = hostURL.Path
+ if transport, ok := c.client.Transport.(*http.Transport); ok {
+ return sockets.ConfigureTransport(transport, c.proto, c.addr)
+ }
+ return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
+ }
+}
+
+// WithHostFromEnv overrides the client host with the host specified in the
+// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set,
+// or set to an empty value, the host is not modified.
+func WithHostFromEnv() Opt {
+ return func(c *Client) error {
+ if host := os.Getenv(EnvOverrideHost); host != "" {
+ return WithHost(host)(c)
+ }
+ return nil
+ }
+}
+
+// WithHTTPClient overrides the client's HTTP client with the specified one.
+func WithHTTPClient(client *http.Client) Opt {
+ return func(c *Client) error {
+ if client != nil {
+ c.client = client
+ }
+ return nil
+ }
+}
+
+// WithTimeout configures the time limit for requests made by the HTTP client.
+func WithTimeout(timeout time.Duration) Opt {
+ return func(c *Client) error {
+ c.client.Timeout = timeout
+ return nil
+ }
+}
+
+// WithUserAgent configures the User-Agent header to use for HTTP requests.
+// It overrides any User-Agent set in headers. When set to an empty string,
+// the User-Agent header is removed, and no header is sent.
+func WithUserAgent(ua string) Opt {
+ return func(c *Client) error {
+ c.userAgent = &ua
+ return nil
+ }
+}
+
+// WithHTTPHeaders appends custom HTTP headers to the client's default headers.
+// It does not allow for built-in headers (such as "User-Agent", if set) to
+// be overridden. Also see [WithUserAgent].
+func WithHTTPHeaders(headers map[string]string) Opt {
+ return func(c *Client) error {
+ c.customHTTPHeaders = headers
+ return nil
+ }
+}
+
+// WithScheme overrides the client scheme with the specified one.
+func WithScheme(scheme string) Opt {
+ return func(c *Client) error {
+ c.scheme = scheme
+ return nil
+ }
+}
+
+// WithTLSClientConfig applies a TLS config to the client transport.
+func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
+ return func(c *Client) error {
+ transport, ok := c.client.Transport.(*http.Transport)
+ if !ok {
+ return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport)
+ }
+ config, err := tlsconfig.Client(tlsconfig.Options{
+ CAFile: cacertPath,
+ CertFile: certPath,
+ KeyFile: keyPath,
+ ExclusiveRootPools: true,
+ })
+ if err != nil {
+ return errors.Wrap(err, "failed to create tls config")
+ }
+ transport.TLSClientConfig = config
+ return nil
+ }
+}
+
+// WithTLSClientConfigFromEnv configures the client's TLS settings with the
+// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY
+// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty,
+// TLS configuration is not modified.
+//
+// WithTLSClientConfigFromEnv uses the following environment variables:
+//
+// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from
+// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem").
+// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification
+// (off by default).
+func WithTLSClientConfigFromEnv() Opt {
+ return func(c *Client) error {
+ dockerCertPath := os.Getenv(EnvOverrideCertPath)
+ if dockerCertPath == "" {
+ return nil
+ }
+ tlsc, err := tlsconfig.Client(tlsconfig.Options{
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"),
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"),
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"),
+ InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "",
+ })
+ if err != nil {
+ return err
+ }
+
+ c.client = &http.Client{
+ Transport: &http.Transport{TLSClientConfig: tlsc},
+ CheckRedirect: CheckRedirect,
+ }
+ return nil
+ }
+}
+
+// WithVersion overrides the client version with the specified one. If an empty
+// version is provided, the value is ignored to allow version negotiation
+// (see [WithAPIVersionNegotiation]).
+func WithVersion(version string) Opt {
+ return func(c *Client) error {
+ if v := strings.TrimPrefix(version, "v"); v != "" {
+ c.version = v
+ c.manualOverride = true
+ }
+ return nil
+ }
+}
+
+// WithVersionFromEnv overrides the client version with the version specified in
+// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable.
+// If DOCKER_API_VERSION is not set, or set to an empty value, the version
+// is not modified.
+func WithVersionFromEnv() Opt {
+ return func(c *Client) error {
+ return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c)
+ }
+}
+
+// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
+// With this option enabled, the client automatically negotiates the API version
+// to use when making requests. API version negotiation is performed on the first
+// request; subsequent requests do not re-negotiate.
+func WithAPIVersionNegotiation() Opt {
+ return func(c *Client) error {
+ c.negotiateVersion = true
+ return nil
+ }
+}
+
+// WithTraceProvider sets the trace provider for the client.
+// If this is not set then the global trace provider will be used.
+func WithTraceProvider(provider trace.TracerProvider) Opt {
+ return WithTraceOptions(otelhttp.WithTracerProvider(provider))
+}
+
+// WithTraceOptions sets tracing span options for the client.
+func WithTraceOptions(opts ...otelhttp.Option) Opt {
+ return func(c *Client) error {
+ c.traceOpts = append(c.traceOpts, opts...)
+ return nil
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
new file mode 100644
index 0000000..c7645e5
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -0,0 +1,80 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// Ping pings the server and returns the value of the "Docker-Experimental",
+// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use
+// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported
+// by the daemon. It ignores internal server errors returned by the API, which
+// may be returned if the daemon is in an unhealthy state, but returns errors
+// for other non-success status codes, failing to connect to the API, or failing
+// to parse the API response.
+func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
+ var ping types.Ping
+
+ // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
+ // because ping requests are used during API version negotiation, so we want
+ // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
+ req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
+ if err != nil {
+ return ping, err
+ }
+ resp, err := cli.doRequest(req)
+ if err != nil {
+ if IsErrConnectionFailed(err) {
+ return ping, err
+ }
+ // We managed to connect, but got some error; continue and try GET request.
+ } else {
+ defer ensureReaderClosed(resp)
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusInternalServerError:
+ // Server handled the request, so parse the response
+ return parsePingResponse(cli, resp)
+ }
+ }
+
+ // HEAD failed; fallback to GET.
+ req.Method = http.MethodGet
+ resp, err = cli.doRequest(req)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return ping, err
+ }
+ return parsePingResponse(cli, resp)
+}
+
+func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) {
+ if resp == nil {
+ return types.Ping{}, nil
+ }
+
+ var ping types.Ping
+ if resp.Header == nil {
+ return ping, cli.checkResponseErr(resp)
+ }
+ ping.APIVersion = resp.Header.Get("Api-Version")
+ ping.OSType = resp.Header.Get("Ostype")
+ if resp.Header.Get("Docker-Experimental") == "true" {
+ ping.Experimental = true
+ }
+ if bv := resp.Header.Get("Builder-Version"); bv != "" {
+ ping.BuilderVersion = types.BuilderVersion(bv)
+ }
+ if si := resp.Header.Get("Swarm"); si != "" {
+ state, role, _ := strings.Cut(si, "/")
+ ping.SwarmStatus = &swarm.Status{
+ NodeState: swarm.LocalNodeState(state),
+ ControlAvailable: role == "manager",
+ }
+ }
+ return ping, cli.checkResponseErr(resp)
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go
new file mode 100644
index 0000000..b95dbaf
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_create.go
@@ -0,0 +1,23 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PluginCreate creates a plugin
+func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
+ headers := http.Header(make(map[string][]string))
+ headers.Set("Content-Type", "application/x-tar")
+
+ query := url.Values{}
+ query.Set("name", createOptions.RepoName)
+
+ resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go
new file mode 100644
index 0000000..9fabe77
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_disable.go
@@ -0,0 +1,23 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PluginDisable disables a plugin
+func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return err
+ }
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go
new file mode 100644
index 0000000..492d0bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_enable.go
@@ -0,0 +1,23 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PluginEnable enables a plugin
+func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("timeout", strconv.Itoa(options.Timeout))
+
+ resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
new file mode 100644
index 0000000..8f107a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -0,0 +1,32 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PluginInspectWithRaw inspects an existing plugin
+func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return nil, nil, err
+ }
+ resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, err
+ }
+ var p types.Plugin
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&p)
+ return &p, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
new file mode 100644
index 0000000..b04dcf9
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_install.go
@@ -0,0 +1,117 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/errdefs"
+ "github.com/pkg/errors"
+)
+
+// PluginInstall installs a plugin
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // set name for plugin pull, if empty should default to remote reference
+ query.Set("name", name)
+
+ resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+
+ name = resp.Header.Get("Docker-Plugin-Name")
+
+ pr, pw := io.Pipe()
+ go func() { // todo: the client should probably be designed more around the actual api
+ _, err := io.Copy(pw, resp.Body)
+ if err != nil {
+ _ = pw.CloseWithError(err)
+ return
+ }
+ defer func() {
+ if err != nil {
+ delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(delResp)
+ }
+ }()
+ if len(options.Args) > 0 {
+ if err := cli.PluginSet(ctx, name, options.Args); err != nil {
+ _ = pw.CloseWithError(err)
+ return
+ }
+ }
+
+ if options.Disabled {
+ _ = pw.Close()
+ return
+ }
+
+ enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+ _ = pw.CloseWithError(enableErr)
+ }()
+ return pr, nil
+}
+
+func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) {
+ return cli.get(ctx, "/plugins/privileges", query, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (*http.Response, error) {
+ return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
+
+func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
+ resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
+ // todo: do inspect before to check existing name before checking privileges
+ newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
+ if privilegeErr != nil {
+ ensureReaderClosed(resp)
+ return nil, privilegeErr
+ }
+ options.RegistryAuth = newAuthHeader
+ resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ }
+ if err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+
+ var privileges types.PluginPrivileges
+ if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+ ensureReaderClosed(resp)
+
+ if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+ accept, err := options.AcceptPermissionsFunc(ctx, privileges)
+ if err != nil {
+ return nil, err
+ }
+ if !accept {
+ return nil, errors.Errorf("permission denied while installing plugin %s", options.RemoteRef)
+ }
+ }
+ return privileges, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
new file mode 100644
index 0000000..03bcf76
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -0,0 +1,33 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+)
+
+// PluginList returns the installed plugins
+func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
+ var plugins types.PluginsListResponse
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return plugins, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/plugins", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return plugins, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&plugins)
+ return plugins, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go
new file mode 100644
index 0000000..da15e44
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_push.go
@@ -0,0 +1,24 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/docker/docker/api/types/registry"
+)
+
+// PluginPush pushes a plugin to a registry
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
new file mode 100644
index 0000000..6ee107e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -0,0 +1,25 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+)
+
+// PluginRemove removes a plugin
+func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go
new file mode 100644
index 0000000..e2a7983
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_set.go
@@ -0,0 +1,17 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+)
+
+// PluginSet modifies settings for an existing plugin
+func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return err
+ }
+
+ resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go
new file mode 100644
index 0000000..4abb29c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go
@@ -0,0 +1,47 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/pkg/errors"
+)
+
+// PluginUpgrade upgrades a plugin
+func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
+ name, err := trimID("plugin", name)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (*http.Response, error) {
+ return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{
+ registry.AuthHeader: {registryAuth},
+ })
+}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
new file mode 100644
index 0000000..2b913aa
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -0,0 +1,325 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/docker/errdefs"
+ "github.com/pkg/errors"
+)
+
+// head sends an http request to the docker API using the method HEAD.
+func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) {
+ return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers)
+}
+
+// get sends an http request to the docker API using the method GET with a specific Go context.
+func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) {
+ return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers)
+}
+
+// post sends an http request to the docker API using the method POST with a specific Go context.
+func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return nil, err
+ }
+ return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
+}
+
+func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) {
+ return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
+}
+
+func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return nil, err
+ }
+ return cli.putRaw(ctx, path, query, body, headers)
+}
+
+// putRaw sends an http request to the docker API using the method PUT.
+func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) {
+ // PUT requests are expected to always have a body (apparently)
+ // so explicitly pass an empty body to sendRequest to signal that
+ // it should set the Content-Type header if not already present.
+ if body == nil {
+ body = http.NoBody
+ }
+ return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers)
+}
+
+// delete sends an http request to the docker API using the method DELETE.
+func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) {
+ return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers)
+}
+
+func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) {
+ if obj == nil {
+ return nil, headers, nil
+ }
+ // encoding/json encodes a nil pointer as the JSON document `null`,
+ // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler.
+ // That is almost certainly not what the caller intended as the request body.
+ if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() {
+ return nil, headers, nil
+ }
+
+ body, err := encodeData(obj)
+ if err != nil {
+ return nil, headers, err
+ }
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ return body, headers, nil
+}
+
+func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, method, path, body)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, headers)
+ req.URL.Scheme = cli.scheme
+ req.URL.Host = cli.addr
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // Override host header for non-tcp connections.
+ req.Host = DummyHost
+ }
+
+ if body != nil && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+ return req, nil
+}
+
+func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) {
+ req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.doRequest(req)
+ switch {
+ case errors.Is(err, context.Canceled):
+ return nil, errdefs.Cancelled(err)
+ case errors.Is(err, context.DeadlineExceeded):
+ return nil, errdefs.Deadline(err)
+ case err == nil:
+ return resp, cli.checkResponseErr(resp)
+ default:
+ return resp, err
+ }
+}
+
+func (cli *Client) doRequest(req *http.Request) (*http.Response, error) {
+ resp, err := cli.client.Do(req)
+ if err != nil {
+ if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
+ return nil, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)}
+ }
+
+ if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
+ return nil, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")}
+ }
+
+ // Don't decorate context sentinel errors; users may be comparing to
+ // them directly.
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+
+ var uErr *url.Error
+ if errors.As(err, &uErr) {
+ var nErr *net.OpError
+ if errors.As(uErr.Err, &nErr) {
+ if os.IsPermission(nErr.Err) {
+ return nil, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)}
+ }
+ }
+ }
+
+ var nErr net.Error
+ if errors.As(err, &nErr) {
+ // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)?
+ if nErr.Timeout() {
+ return nil, connectionFailed(cli.host)
+ }
+ if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") {
+ return nil, connectionFailed(cli.host)
+ }
+ }
+
+ // Although there's not a strongly typed error for this in go-winio,
+ // lots of people are using the default configuration for the docker
+ // daemon on Windows where the daemon is listening on a named pipe
+ // `//./pipe/docker_engine, and the client must be running elevated.
+ // Give users a clue rather than the not-overly useful message
+ // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
+ // open //./pipe/docker_engine: The system cannot find the file specified.`.
+ // Note we can't string compare "The system cannot find the file specified" as
+ // this is localised - for example in French the error would be
+ // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
+ if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
+ // Checks if client is running with elevated privileges
+ if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil {
+ err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect")
+ } else {
+ _ = f.Close()
+ err = errors.Wrap(err, "this error may indicate that the docker daemon is not running")
+ }
+ }
+
+ return nil, errConnectionFailed{errors.Wrap(err, "error during connect")}
+ }
+
+ return resp, nil
+}
+
+func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
+ if serverResp == nil {
+ return nil
+ }
+ if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 {
+ return nil
+ }
+ defer func() {
+ retErr = errdefs.FromStatusCode(retErr, serverResp.StatusCode)
+ }()
+
+ var body []byte
+ var err error
+ var reqURL string
+ if serverResp.Request != nil {
+ reqURL = serverResp.Request.URL.String()
+ }
+ statusMsg := serverResp.Status
+ if statusMsg == "" {
+ statusMsg = http.StatusText(serverResp.StatusCode)
+ }
+ if serverResp.Body != nil {
+ bodyMax := 1 * 1024 * 1024 // 1 MiB
+ bodyR := &io.LimitedReader{
+ R: serverResp.Body,
+ N: int64(bodyMax),
+ }
+ body, err = io.ReadAll(bodyR)
+ if err != nil {
+ return err
+ }
+ if bodyR.N == 0 {
+ if reqURL != "" {
+ return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL)
+ }
+ return fmt.Errorf("request returned %s with a message (> %d bytes); check if the server supports the requested API version", statusMsg, bodyMax)
+ }
+ }
+ if len(body) == 0 {
+ if reqURL != "" {
+ return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL)
+ }
+ return fmt.Errorf("request returned %s; check if the server supports the requested API version", statusMsg)
+ }
+
+ var daemonErr error
+ if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) {
+ var errorResponse types.ErrorResponse
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return errors.Wrap(err, "Error reading JSON")
+ }
+ if errorResponse.Message == "" {
+ // Error-message is empty, which means that we successfully parsed the
+ // JSON-response (no error produced), but it didn't contain an error
+ // message. This could either be because the response was empty, or
+ // the response was valid JSON, but not with the expected schema
+ // ([types.ErrorResponse]).
+ //
+ // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields)
+ // due to the API using an open schema (we must anticipate fields
+ // being added to [types.ErrorResponse] in the future, and not
+ // reject those responses.
+ //
+ // For these cases, we construct an error with the status-code
+ // returned, but we could consider returning (a truncated version
+ // of) the actual response as-is.
+ //
+ // TODO(thaJeztah): consider adding a log.Debug to allow clients to debug the actual response when enabling debug logging.
+ daemonErr = fmt.Errorf(`API returned a %d (%s) but provided no error-message`,
+ serverResp.StatusCode,
+ http.StatusText(serverResp.StatusCode),
+ )
+ } else {
+ daemonErr = errors.New(strings.TrimSpace(errorResponse.Message))
+ }
+ } else {
+ // Fall back to returning the response as-is for API versions < 1.24
+ // that didn't support JSON error responses, and for situations
+ // where a plain text error is returned. This branch may also catch
+ // situations where a proxy is involved, returning a HTML response.
+ daemonErr = errors.New(strings.TrimSpace(string(body)))
+ }
+ return errors.Wrap(daemonErr, "Error response from daemon")
+}
+
+func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request {
+ // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+ // then the user can't change OUR headers
+ for k, v := range cli.customHTTPHeaders {
+ if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" {
+ continue
+ }
+ req.Header.Set(k, v)
+ }
+
+ for k, v := range headers {
+ req.Header[http.CanonicalHeaderKey(k)] = v
+ }
+
+ if cli.userAgent != nil {
+ if *cli.userAgent == "" {
+ req.Header.Del("User-Agent")
+ } else {
+ req.Header.Set("User-Agent", *cli.userAgent)
+ }
+ }
+ return req
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
+
+func ensureReaderClosed(response *http.Response) {
+ if response != nil && response.Body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ // see https://github.com/google/go-github/pull/317/files#r57536827
+ //
+ // TODO(thaJeztah): see if this optimization is still needed, or already implemented in stdlib,
+ // and check if context-cancellation should handle this as well. If still needed, consider
+ // wrapping response.Body, or returning a "closer()" from [Client.sendRequest] and related
+ // methods.
+ _, _ = io.CopyN(io.Discard, response.Body, 512)
+ _ = response.Body.Close()
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
new file mode 100644
index 0000000..bbd1191
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_create.go
@@ -0,0 +1,25 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SecretCreate creates a new secret.
+func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil {
+ return types.SecretCreateResponse{}, err
+ }
+ resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return types.SecretCreateResponse{}, err
+ }
+
+ var response types.SecretCreateResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
new file mode 100644
index 0000000..fdabc19
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -0,0 +1,37 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SecretInspectWithRaw returns the secret information with raw data
+func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
+ id, err := trimID("secret", id)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+ if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil {
+ return swarm.Secret{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+
+ var secret swarm.Secret
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&secret)
+
+ return secret, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
new file mode 100644
index 0000000..e3b7dbd
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -0,0 +1,38 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SecretList returns the list of secrets.
+func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/secrets", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var secrets []swarm.Secret
+ err = json.NewDecoder(resp.Body).Decode(&secrets)
+ return secrets, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
new file mode 100644
index 0000000..7ea2acb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_remove.go
@@ -0,0 +1,17 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// SecretRemove removes a secret.
+func (cli *Client) SecretRemove(ctx context.Context, id string) error {
+ id, err := trimID("secret", id)
+ if err != nil {
+ return err
+ }
+ if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go
new file mode 100644
index 0000000..60d21a6
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_update.go
@@ -0,0 +1,24 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SecretUpdate attempts to update a secret.
+func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
+ id, err := trimID("secret", id)
+ if err != nil {
+ return err
+ }
+ if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", version.String())
+ resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
new file mode 100644
index 0000000..54c03b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -0,0 +1,213 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// ServiceCreate creates a new service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
+ var response swarm.ServiceCreateResponse
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return response, err
+ }
+
+ // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
+ if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
+ service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
+ }
+
+ if err := validateServiceSpec(service); err != nil {
+ return response, err
+ }
+ if versions.LessThan(cli.version, "1.30") {
+ if err := validateAPIVersion(service, cli.version); err != nil {
+ return response, err
+ }
+ }
+
+ // ensure that the image is tagged
+ var resolveWarning string
+ switch {
+ case service.TaskTemplate.ContainerSpec != nil:
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
+ }
+ case service.TaskTemplate.PluginSpec != nil:
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
+ }
+ }
+
+ headers := http.Header{}
+ if versions.LessThan(cli.version, "1.30") {
+ // the custom "version" header was used by engine API before 20.10
+ // (API 1.30) to switch between client- and server-side lookup of
+ // image digests.
+ headers["version"] = []string{cli.version}
+ }
+ if options.EncodedRegistryAuth != "" {
+ headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth}
+ }
+ resp, err := cli.post(ctx, "/services/create", nil, service, headers)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ if resolveWarning != "" {
+ response.Warnings = append(response.Warnings, resolveWarning)
+ }
+
+ return response, err
+}
+
+func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
+ var warning string
+ if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil {
+ warning = digestWarning(taskSpec.ContainerSpec.Image)
+ } else {
+ taskSpec.ContainerSpec.Image = img
+ if len(imgPlatforms) > 0 {
+ if taskSpec.Placement == nil {
+ taskSpec.Placement = &swarm.Placement{}
+ }
+ taskSpec.Placement.Platforms = imgPlatforms
+ }
+ }
+ return warning
+}
+
+func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
+ var warning string
+ if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil {
+ warning = digestWarning(taskSpec.PluginSpec.Remote)
+ } else {
+ taskSpec.PluginSpec.Remote = img
+ if len(imgPlatforms) > 0 {
+ if taskSpec.Placement == nil {
+ taskSpec.Placement = &swarm.Placement{}
+ }
+ taskSpec.Placement.Platforms = imgPlatforms
+ }
+ }
+ return warning
+}
+
+func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
+ distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
+ var platforms []swarm.Platform
+ if err != nil {
+ return "", nil, err
+ }
+
+ imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+
+ if len(distributionInspect.Platforms) > 0 {
+ platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
+ for _, p := range distributionInspect.Platforms {
+ // clear architecture field for arm. This is a temporary patch to address
+ // https://github.com/docker/swarmkit/issues/2294. The issue is that while
+ // image manifests report "arm" as the architecture, the node reports
+ // something like "armv7l" (includes the variant), which causes arm images
+ // to stop working with swarm mode. This patch removes the architecture
+ // constraint for arm images to ensure tasks get scheduled.
+ arch := p.Architecture
+ if strings.ToLower(arch) == "arm" {
+ arch = ""
+ }
+ platforms = append(platforms, swarm.Platform{
+ Architecture: arch,
+ OS: p.OS,
+ })
+ }
+ }
+ return imageWithDigest, platforms, err
+}
+
+// imageWithDigestString takes an image string and a digest, and updates
+// the image string if it didn't originally contain a digest. It returns
+// image unmodified in other situations.
+func imageWithDigestString(image string, dgst digest.Digest) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
+ // ensure that image gets a default tag if none is provided
+ img, err := reference.WithDigest(namedRef, dgst)
+ if err == nil {
+ return reference.FamiliarString(img)
+ }
+ }
+ }
+ return image
+}
+
+// imageWithTagString takes an image string, and returns a tagged image
+// string, adding a 'latest' tag if one was not provided. It returns an
+// empty string if a canonical reference was provided
+func imageWithTagString(image string) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ return reference.FamiliarString(reference.TagNameOnly(namedRef))
+ }
+ return ""
+}
+
+// digestWarning constructs a formatted warning string using the
+// image name that could not be pinned by digest. The formatting
+// is hardcoded, but could me made smarter in the future
+func digestWarning(image string) string {
+ return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
+}
+
+func validateServiceSpec(s swarm.ServiceSpec) error {
+ if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
+ return errors.New("must not specify both a container spec and a plugin spec in the task template")
+ }
+ if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
+ return errors.New("mismatched runtime with plugin spec")
+ }
+ if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
+ return errors.New("mismatched runtime with container spec")
+ }
+ return nil
+}
+
+func validateAPIVersion(c swarm.ServiceSpec, apiVersion string) error {
+ for _, m := range c.TaskTemplate.ContainerSpec.Mounts {
+ if m.BindOptions != nil {
+ if m.BindOptions.NonRecursive && versions.LessThan(apiVersion, "1.40") {
+ return errors.Errorf("bind-recursive=disabled requires API v1.40 or later")
+ }
+ // ReadOnlyNonRecursive can be safely ignored when API < 1.44
+ if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(apiVersion, "1.44") {
+ return errors.Errorf("bind-recursive=readonly requires API v1.44 or later")
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
new file mode 100644
index 0000000..77b4402
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -0,0 +1,39 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+ serviceID, err := trimID("service", serviceID)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ query := url.Values{}
+ query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
+ resp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ var response swarm.Service
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
new file mode 100644
index 0000000..f589a84
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -0,0 +1,39 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ if options.Status {
+ query.Set("status", "true")
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.Body).Decode(&services)
+ return services, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go
new file mode 100644
index 0000000..6e0cbee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_logs.go
@@ -0,0 +1,57 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ timetypes "github.com/docker/docker/api/types/time"
+ "github.com/pkg/errors"
+)
+
+// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) {
+ serviceID, err := trimID("service", serviceID)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, errors.Wrap(err, `invalid value for "since"`)
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
new file mode 100644
index 0000000..93c949e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_remove.go
@@ -0,0 +1,15 @@
+package client // import "github.com/docker/docker/client"
+
+import "context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ serviceID, err := trimID("service", serviceID)
+ if err != nil {
+ return err
+ }
+
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
new file mode 100644
index 0000000..ecb98f4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_update.go
@@ -0,0 +1,90 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes.
+// It should be the value as set *before* the update. You can find this value in the Meta field
+// of swarm.Service, which can be found using ServiceInspectWithRaw.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
+ serviceID, err := trimID("service", serviceID)
+ if err != nil {
+ return swarm.ServiceUpdateResponse{}, err
+ }
+
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return swarm.ServiceUpdateResponse{}, err
+ }
+
+ query := url.Values{}
+ if options.RegistryAuthFrom != "" {
+ query.Set("registryAuthFrom", options.RegistryAuthFrom)
+ }
+
+ if options.Rollback != "" {
+ query.Set("rollback", options.Rollback)
+ }
+
+ query.Set("version", version.String())
+
+ if err := validateServiceSpec(service); err != nil {
+ return swarm.ServiceUpdateResponse{}, err
+ }
+
+ // ensure that the image is tagged
+ var resolveWarning string
+ switch {
+ case service.TaskTemplate.ContainerSpec != nil:
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
+ }
+ case service.TaskTemplate.PluginSpec != nil:
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
+ }
+ }
+
+ headers := http.Header{}
+ if versions.LessThan(cli.version, "1.30") {
+ // the custom "version" header was used by engine API before 20.10
+ // (API 1.30) to switch between client- and server-side lookup of
+ // image digests.
+ headers["version"] = []string{cli.version}
+ }
+ if options.EncodedRegistryAuth != "" {
+ headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth}
+ }
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.ServiceUpdateResponse{}, err
+ }
+
+ var response swarm.ServiceUpdateResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ if resolveWarning != "" {
+ response.Warnings = append(response.Warnings, resolveWarning)
+ }
+
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
new file mode 100644
index 0000000..271fc08
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+)
+
+// SwarmGetUnlockKey retrieves the swarm's unlock key.
+func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
+ resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return types.SwarmUnlockKeyResponse{}, err
+ }
+
+ var response types.SwarmUnlockKeyResponse
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go
new file mode 100644
index 0000000..3dcb2a5
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SwarmInit initializes the swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ resp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go
new file mode 100644
index 0000000..3d5a8a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SwarmInspect inspects the swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ resp, err := cli.get(ctx, "/swarm", nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go
new file mode 100644
index 0000000..a1cf045
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_join.go
@@ -0,0 +1,14 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SwarmJoin joins the swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go
new file mode 100644
index 0000000..90ca84b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_leave.go
@@ -0,0 +1,17 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+)
+
+// SwarmLeave leaves the swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go
new file mode 100644
index 0000000..745d64d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_unlock.go
@@ -0,0 +1,14 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SwarmUnlock unlocks locked swarm.
+func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
+ resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go
new file mode 100644
index 0000000..9fde7d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_update.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// SwarmUpdate updates the swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
+ query := url.Values{}
+ query.Set("version", version.String())
+ query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken))
+ query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken))
+ query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
new file mode 100644
index 0000000..37668bd
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation.
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ taskID, err := trimID("task", taskID)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
new file mode 100644
index 0000000..aba7f61
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToJSON(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.Body).Decode(&tasks)
+ return tasks, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go
new file mode 100644
index 0000000..9dcb977
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_logs.go
@@ -0,0 +1,51 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// TaskLogs returns the logs generated by a task in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
new file mode 100644
index 0000000..925d4d8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -0,0 +1,96 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/errdefs"
+ "github.com/docker/docker/internal/lazyregexp"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`)
+
+type emptyIDError string
+
+func (e emptyIDError) InvalidParameter() {}
+
+func (e emptyIDError) Error() string {
+ return "invalid " + string(e) + " name or ID: value is empty"
+}
+
+// trimID trims the given object-ID / name, returning an error if it's empty.
+func trimID(objType, id string) (string, error) {
+ id = strings.TrimSpace(id)
+ if len(id) == 0 {
+ return "", emptyIDError(objType)
+ }
+ return id, nil
+}
+
+// getDockerOS returns the operating system based on the server header from the daemon.
+func getDockerOS(serverHeader string) string {
+ var osType string
+ matches := headerRegexp.FindStringSubmatch(serverHeader)
+ if len(matches) > 0 {
+ osType = matches[1]
+ }
+ return osType
+}
+
+// getFiltersQuery returns a url query with "filters" query term, based on the
+// filters provided.
+func getFiltersQuery(f filters.Args) (url.Values, error) {
+ query := url.Values{}
+ if f.Len() > 0 {
+ filterJSON, err := filters.ToJSON(f)
+ if err != nil {
+ return query, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ return query, nil
+}
+
+// encodePlatforms marshals the given platform(s) to JSON format, to
+// be used for query-parameters for filtering / selecting platforms.
+func encodePlatforms(platform ...ocispec.Platform) ([]string, error) {
+ if len(platform) == 0 {
+ return []string{}, nil
+ }
+ if len(platform) == 1 {
+ p, err := encodePlatform(&platform[0])
+ if err != nil {
+ return nil, err
+ }
+ return []string{p}, nil
+ }
+
+ seen := make(map[string]struct{}, len(platform))
+ out := make([]string, 0, len(platform))
+ for i := range platform {
+ p, err := encodePlatform(&platform[i])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := seen[p]; !ok {
+ out = append(out, p)
+ seen[p] = struct{}{}
+ }
+ }
+ return out, nil
+}
+
+// encodePlatform marshals the given platform to JSON format, to
+// be used for query-parameters for filtering / selecting platforms. It
+// is used as a helper for encodePlatforms,
+func encodePlatform(platform *ocispec.Platform) (string, error) {
+ p, err := json.Marshal(platform)
+ if err != nil {
+ return "", errdefs.InvalidParameter(fmt.Errorf("invalid platform: %v", err))
+ }
+ return string(p), nil
+}
diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go
new file mode 100644
index 0000000..4566fd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/version.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ServerVersion returns information of the docker client and server host.
+func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
+ resp, err := cli.get(ctx, "/version", nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return types.Version{}, err
+ }
+
+ var server types.Version
+ err = json.NewDecoder(resp.Body).Decode(&server)
+ return server, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go
new file mode 100644
index 0000000..bedb3ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_create.go
@@ -0,0 +1,21 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/volume"
+)
+
+// VolumeCreate creates a volume in the docker host.
+func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) {
+ resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return volume.Volume{}, err
+ }
+
+ var vol volume.Volume
+ err = json.NewDecoder(resp.Body).Decode(&vol)
+ return vol, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
new file mode 100644
index 0000000..ce32bbb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -0,0 +1,40 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/docker/docker/api/types/volume"
+)
+
+// VolumeInspect returns the information about a specific volume in the docker host.
+func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) {
+ vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
+ return vol, err
+}
+
+// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
+func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) {
+ volumeID, err := trimID("volume", volumeID)
+ if err != nil {
+ return volume.Volume{}, nil, err
+ }
+
+ resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return volume.Volume{}, nil, err
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return volume.Volume{}, nil, err
+ }
+
+ var vol volume.Volume
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&vol)
+ return vol, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
new file mode 100644
index 0000000..de6ce23
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_list.go
@@ -0,0 +1,33 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/volume"
+)
+
+// VolumeList returns the volumes configured in the docker host.
+func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ //nolint:staticcheck // ignore SA1019 for old code
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return volume.ListResponse{}, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/volumes", query, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return volume.ListResponse{}, err
+ }
+
+ var volumes volume.ListResponse
+ err = json.NewDecoder(resp.Body).Decode(&volumes)
+ return volumes, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go
new file mode 100644
index 0000000..7da148f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_prune.go
@@ -0,0 +1,35 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/volume"
+)
+
+// VolumesPrune requests the daemon to delete unused data
+func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) {
+ if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil {
+ return volume.PruneReport{}, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return volume.PruneReport{}, err
+ }
+
+ resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
+ defer ensureReaderClosed(resp)
+ if err != nil {
+ return volume.PruneReport{}, err
+ }
+
+ var report volume.PruneReport
+ if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
+ return volume.PruneReport{}, fmt.Errorf("Error retrieving volume prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
new file mode 100644
index 0000000..eefd9ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_remove.go
@@ -0,0 +1,34 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/versions"
+)
+
+// VolumeRemove removes a volume from the docker host.
+func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
+ volumeID, err := trimID("volume", volumeID)
+ if err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ if force {
+ // Make sure we negotiated (if the client is configured to do so),
+ // as code below contains API-version specific handling of options.
+ //
+ // Normally, version-negotiation (if enabled) would not happen until
+ // the API request is made.
+ if err := cli.checkVersion(ctx); err != nil {
+ return err
+ }
+ if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
+ query.Set("force", "1")
+ }
+ }
+ resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
+ defer ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go
new file mode 100644
index 0000000..c91d5e9
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_update.go
@@ -0,0 +1,28 @@
+package client // import "github.com/docker/docker/client"
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/volume"
+)
+
+// VolumeUpdate updates a volume. This only works for Cluster Volumes, and
+// only some fields can be updated.
+func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error {
+ volumeID, err := trimID("volume", volumeID)
+ if err != nil {
+ return err
+ }
+ if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil {
+ return err
+ }
+
+ query := url.Values{}
+ query.Set("version", version.String())
+
+ resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go
new file mode 100644
index 0000000..a5523c3
--- /dev/null
+++ b/vendor/github.com/docker/docker/errdefs/defs.go
@@ -0,0 +1,69 @@
+package errdefs
+
+// ErrNotFound signals that the requested object doesn't exist
+type ErrNotFound interface {
+ NotFound()
+}
+
+// ErrInvalidParameter signals that the user input is invalid
+type ErrInvalidParameter interface {
+ InvalidParameter()
+}
+
+// ErrConflict signals that some internal state conflicts with the requested action and can't be performed.
+// A change in state should be able to clear this error.
+type ErrConflict interface {
+ Conflict()
+}
+
+// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action
+type ErrUnauthorized interface {
+ Unauthorized()
+}
+
+// ErrUnavailable signals that the requested action/subsystem is not available.
+type ErrUnavailable interface {
+ Unavailable()
+}
+
+// ErrForbidden signals that the requested action cannot be performed under any circumstances.
+// When a ErrForbidden is returned, the caller should never retry the action.
+type ErrForbidden interface {
+ Forbidden()
+}
+
+// ErrSystem signals that some internal error occurred.
+// An example of this would be a failed mount request.
+type ErrSystem interface {
+ System()
+}
+
+// ErrNotModified signals that an action can't be performed because it's already in the desired state
+type ErrNotModified interface {
+ NotModified()
+}
+
+// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured.
+type ErrNotImplemented interface {
+ NotImplemented()
+}
+
+// ErrUnknown signals that the kind of error that occurred is not known.
+type ErrUnknown interface {
+ Unknown()
+}
+
+// ErrCancelled signals that the action was cancelled.
+type ErrCancelled interface {
+ Cancelled()
+}
+
+// ErrDeadline signals that the deadline was reached before the action completed.
+type ErrDeadline interface {
+ DeadlineExceeded()
+}
+
+// ErrDataLoss indicates that data was lost or there is data corruption.
+type ErrDataLoss interface {
+ DataLoss()
+}
diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go
new file mode 100644
index 0000000..c211f17
--- /dev/null
+++ b/vendor/github.com/docker/docker/errdefs/doc.go
@@ -0,0 +1,8 @@
+// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors.
+// Errors that cross the package boundary should implement one (and only one) of these interfaces.
+//
+// Packages should not reference these interfaces directly, only implement them.
+// To check if a particular error implements one of these interfaces, there are helper
+// functions provided (e.g. `Is<SomeError>`) which can be used rather than asserting the interfaces directly.
+// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`).
+package errdefs // import "github.com/docker/docker/errdefs"
diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go
new file mode 100644
index 0000000..ab76e62
--- /dev/null
+++ b/vendor/github.com/docker/docker/errdefs/helpers.go
@@ -0,0 +1,305 @@
+package errdefs
+
+import "context"
+
+type errNotFound struct{ error }
+
+func (errNotFound) NotFound() {}
+
+func (e errNotFound) Cause() error {
+ return e.error
+}
+
+func (e errNotFound) Unwrap() error {
+ return e.error
+}
+
+// NotFound creates an [ErrNotFound] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrNotFound],
+func NotFound(err error) error {
+ if err == nil || IsNotFound(err) {
+ return err
+ }
+ return errNotFound{err}
+}
+
+type errInvalidParameter struct{ error }
+
+func (errInvalidParameter) InvalidParameter() {}
+
+func (e errInvalidParameter) Cause() error {
+ return e.error
+}
+
+func (e errInvalidParameter) Unwrap() error {
+ return e.error
+}
+
+// InvalidParameter creates an [ErrInvalidParameter] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrInvalidParameter],
+func InvalidParameter(err error) error {
+ if err == nil || IsInvalidParameter(err) {
+ return err
+ }
+ return errInvalidParameter{err}
+}
+
+type errConflict struct{ error }
+
+func (errConflict) Conflict() {}
+
+func (e errConflict) Cause() error {
+ return e.error
+}
+
+func (e errConflict) Unwrap() error {
+ return e.error
+}
+
+// Conflict creates an [ErrConflict] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrConflict],
+func Conflict(err error) error {
+ if err == nil || IsConflict(err) {
+ return err
+ }
+ return errConflict{err}
+}
+
+type errUnauthorized struct{ error }
+
+func (errUnauthorized) Unauthorized() {}
+
+func (e errUnauthorized) Cause() error {
+ return e.error
+}
+
+func (e errUnauthorized) Unwrap() error {
+ return e.error
+}
+
+// Unauthorized creates an [ErrUnauthorized] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrUnauthorized],
+func Unauthorized(err error) error {
+ if err == nil || IsUnauthorized(err) {
+ return err
+ }
+ return errUnauthorized{err}
+}
+
+type errUnavailable struct{ error }
+
+func (errUnavailable) Unavailable() {}
+
+func (e errUnavailable) Cause() error {
+ return e.error
+}
+
+func (e errUnavailable) Unwrap() error {
+ return e.error
+}
+
+// Unavailable creates an [ErrUnavailable] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrUnavailable],
+func Unavailable(err error) error {
+ if err == nil || IsUnavailable(err) {
+ return err
+ }
+ return errUnavailable{err}
+}
+
+type errForbidden struct{ error }
+
+func (errForbidden) Forbidden() {}
+
+func (e errForbidden) Cause() error {
+ return e.error
+}
+
+func (e errForbidden) Unwrap() error {
+ return e.error
+}
+
+// Forbidden creates an [ErrForbidden] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrForbidden],
+func Forbidden(err error) error {
+ if err == nil || IsForbidden(err) {
+ return err
+ }
+ return errForbidden{err}
+}
+
+type errSystem struct{ error }
+
+func (errSystem) System() {}
+
+func (e errSystem) Cause() error {
+ return e.error
+}
+
+func (e errSystem) Unwrap() error {
+ return e.error
+}
+
+// System creates an [ErrSystem] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrSystem],
+func System(err error) error {
+ if err == nil || IsSystem(err) {
+ return err
+ }
+ return errSystem{err}
+}
+
+type errNotModified struct{ error }
+
+func (errNotModified) NotModified() {}
+
+func (e errNotModified) Cause() error {
+ return e.error
+}
+
+func (e errNotModified) Unwrap() error {
+ return e.error
+}
+
+// NotModified creates an [ErrNotModified] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [NotModified],
+func NotModified(err error) error {
+ if err == nil || IsNotModified(err) {
+ return err
+ }
+ return errNotModified{err}
+}
+
+type errNotImplemented struct{ error }
+
+func (errNotImplemented) NotImplemented() {}
+
+func (e errNotImplemented) Cause() error {
+ return e.error
+}
+
+func (e errNotImplemented) Unwrap() error {
+ return e.error
+}
+
+// NotImplemented creates an [ErrNotImplemented] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrNotImplemented],
+func NotImplemented(err error) error {
+ if err == nil || IsNotImplemented(err) {
+ return err
+ }
+ return errNotImplemented{err}
+}
+
+type errUnknown struct{ error }
+
+func (errUnknown) Unknown() {}
+
+func (e errUnknown) Cause() error {
+ return e.error
+}
+
+func (e errUnknown) Unwrap() error {
+ return e.error
+}
+
+// Unknown creates an [ErrUnknown] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrUnknown],
+func Unknown(err error) error {
+ if err == nil || IsUnknown(err) {
+ return err
+ }
+ return errUnknown{err}
+}
+
+type errCancelled struct{ error }
+
+func (errCancelled) Cancelled() {}
+
+func (e errCancelled) Cause() error {
+ return e.error
+}
+
+func (e errCancelled) Unwrap() error {
+ return e.error
+}
+
+// Cancelled creates an [ErrCancelled] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrCancelled],
+func Cancelled(err error) error {
+ if err == nil || IsCancelled(err) {
+ return err
+ }
+ return errCancelled{err}
+}
+
+type errDeadline struct{ error }
+
+func (errDeadline) DeadlineExceeded() {}
+
+func (e errDeadline) Cause() error {
+ return e.error
+}
+
+func (e errDeadline) Unwrap() error {
+ return e.error
+}
+
+// Deadline creates an [ErrDeadline] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrDeadline],
+func Deadline(err error) error {
+ if err == nil || IsDeadline(err) {
+ return err
+ }
+ return errDeadline{err}
+}
+
+type errDataLoss struct{ error }
+
+func (errDataLoss) DataLoss() {}
+
+func (e errDataLoss) Cause() error {
+ return e.error
+}
+
+func (e errDataLoss) Unwrap() error {
+ return e.error
+}
+
+// DataLoss creates an [ErrDataLoss] error from the given error.
+// It returns the error as-is if it is either nil (no error) or already implements
+// [ErrDataLoss],
+func DataLoss(err error) error {
+ if err == nil || IsDataLoss(err) {
+ return err
+ }
+ return errDataLoss{err}
+}
+
+// FromContext returns the error class from the passed in context
+func FromContext(ctx context.Context) error {
+ e := ctx.Err()
+ if e == nil {
+ return nil
+ }
+
+ if e == context.Canceled {
+ return Cancelled(e)
+ }
+ if e == context.DeadlineExceeded {
+ return Deadline(e)
+ }
+ return Unknown(e)
+}
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
new file mode 100644
index 0000000..0a8fadd
--- /dev/null
+++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go
@@ -0,0 +1,47 @@
+package errdefs
+
+import (
+ "net/http"
+)
+
+// FromStatusCode creates an errdef error, based on the provided HTTP status-code
+func FromStatusCode(err error, statusCode int) error {
+ if err == nil {
+ return nil
+ }
+ switch statusCode {
+ case http.StatusNotFound:
+ return NotFound(err)
+ case http.StatusBadRequest:
+ return InvalidParameter(err)
+ case http.StatusConflict:
+ return Conflict(err)
+ case http.StatusUnauthorized:
+ return Unauthorized(err)
+ case http.StatusServiceUnavailable:
+ return Unavailable(err)
+ case http.StatusForbidden:
+ return Forbidden(err)
+ case http.StatusNotModified:
+ return NotModified(err)
+ case http.StatusNotImplemented:
+ return NotImplemented(err)
+ case http.StatusInternalServerError:
+ if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) {
+ return err
+ }
+ return System(err)
+ default:
+ switch {
+ case statusCode >= 200 && statusCode < 400:
+ // it's a client error
+ return err
+ case statusCode >= 400 && statusCode < 500:
+ return InvalidParameter(err)
+ case statusCode >= 500 && statusCode < 600:
+ return System(err)
+ default:
+ return Unknown(err)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go
new file mode 100644
index 0000000..30ea7e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/errdefs/is.go
@@ -0,0 +1,123 @@
+package errdefs
+
+import (
+ "context"
+ "errors"
+)
+
+type causer interface {
+ Cause() error
+}
+
+type wrapErr interface {
+ Unwrap() error
+}
+
+func getImplementer(err error) error {
+ switch e := err.(type) {
+ case
+ ErrNotFound,
+ ErrInvalidParameter,
+ ErrConflict,
+ ErrUnauthorized,
+ ErrUnavailable,
+ ErrForbidden,
+ ErrSystem,
+ ErrNotModified,
+ ErrNotImplemented,
+ ErrCancelled,
+ ErrDeadline,
+ ErrDataLoss,
+ ErrUnknown:
+ return err
+ case causer:
+ return getImplementer(e.Cause())
+ case wrapErr:
+ return getImplementer(e.Unwrap())
+ default:
+ return err
+ }
+}
+
+// IsNotFound returns if the passed in error is an [ErrNotFound],
+func IsNotFound(err error) bool {
+ _, ok := getImplementer(err).(ErrNotFound)
+ return ok
+}
+
+// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter].
+func IsInvalidParameter(err error) bool {
+ _, ok := getImplementer(err).(ErrInvalidParameter)
+ return ok
+}
+
+// IsConflict returns if the passed in error is an [ErrConflict].
+func IsConflict(err error) bool {
+ _, ok := getImplementer(err).(ErrConflict)
+ return ok
+}
+
+// IsUnauthorized returns if the passed in error is an [ErrUnauthorized].
+func IsUnauthorized(err error) bool {
+ _, ok := getImplementer(err).(ErrUnauthorized)
+ return ok
+}
+
+// IsUnavailable returns if the passed in error is an [ErrUnavailable].
+func IsUnavailable(err error) bool {
+ _, ok := getImplementer(err).(ErrUnavailable)
+ return ok
+}
+
+// IsForbidden returns if the passed in error is an [ErrForbidden].
+func IsForbidden(err error) bool {
+ _, ok := getImplementer(err).(ErrForbidden)
+ return ok
+}
+
+// IsSystem returns if the passed in error is an [ErrSystem].
+func IsSystem(err error) bool {
+ _, ok := getImplementer(err).(ErrSystem)
+ return ok
+}
+
+// IsNotModified returns if the passed in error is an [ErrNotModified].
+func IsNotModified(err error) bool {
+ _, ok := getImplementer(err).(ErrNotModified)
+ return ok
+}
+
+// IsNotImplemented returns if the passed in error is an [ErrNotImplemented].
+func IsNotImplemented(err error) bool {
+ _, ok := getImplementer(err).(ErrNotImplemented)
+ return ok
+}
+
+// IsUnknown returns if the passed in error is an [ErrUnknown].
+func IsUnknown(err error) bool {
+ _, ok := getImplementer(err).(ErrUnknown)
+ return ok
+}
+
+// IsCancelled returns if the passed in error is an [ErrCancelled].
+func IsCancelled(err error) bool {
+ _, ok := getImplementer(err).(ErrCancelled)
+ return ok
+}
+
+// IsDeadline returns if the passed in error is an [ErrDeadline].
+func IsDeadline(err error) bool {
+ _, ok := getImplementer(err).(ErrDeadline)
+ return ok
+}
+
+// IsDataLoss returns if the passed in error is an [ErrDataLoss].
+func IsDataLoss(err error) bool {
+ _, ok := getImplementer(err).(ErrDataLoss)
+ return ok
+}
+
+// IsContext returns if the passed in error is due to context cancellation or deadline exceeded.
+func IsContext(err error) bool {
+ return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
+}
diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go
new file mode 100644
index 0000000..6334edb
--- /dev/null
+++ b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go
@@ -0,0 +1,90 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code below was largely copied from golang.org/x/mod@v0.22;
+// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go
+// with some additional methods added.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+)
+
+// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+ str string
+ once sync.Once
+ rx *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+ r.once.Do(r.build)
+ return r.rx
+}
+
+func (r *Regexp) build() {
+ r.rx = regexp.MustCompile(r.str)
+ r.str = ""
+}
+
+func (r *Regexp) FindSubmatch(s []byte) [][]byte {
+ return r.re().FindSubmatch(s)
+}
+
+func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
+ return r.re().FindAllStringSubmatch(s, n)
+}
+
+func (r *Regexp) FindStringSubmatch(s string) []string {
+ return r.re().FindStringSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatchIndex(s string) []int {
+ return r.re().FindStringSubmatchIndex(s)
+}
+
+func (r *Regexp) ReplaceAllString(src, repl string) string {
+ return r.re().ReplaceAllString(src, repl)
+}
+
+func (r *Regexp) FindString(s string) string {
+ return r.re().FindString(s)
+}
+
+func (r *Regexp) FindAllString(s string, n int) []string {
+ return r.re().FindAllString(s, n)
+}
+
+func (r *Regexp) MatchString(s string) bool {
+ return r.re().MatchString(s)
+}
+
+func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
+ return r.re().ReplaceAllStringFunc(src, repl)
+}
+
+func (r *Regexp) SubexpNames() []string {
+ return r.re().SubexpNames()
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+ lr := &Regexp{str: str}
+ if inTest {
+ // In tests, always compile the regexps early.
+ lr.re()
+ }
+ return lr
+}
diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go
new file mode 100644
index 0000000..cf4d6a5
--- /dev/null
+++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go
@@ -0,0 +1,46 @@
+package multierror
+
+import (
+ "strings"
+)
+
+// Join is a drop-in replacement for errors.Join with better formatting.
+func Join(errs ...error) error {
+ n := 0
+ for _, err := range errs {
+ if err != nil {
+ n++
+ }
+ }
+ if n == 0 {
+ return nil
+ }
+ e := &joinError{
+ errs: make([]error, 0, n),
+ }
+ for _, err := range errs {
+ if err != nil {
+ e.errs = append(e.errs, err)
+ }
+ }
+ return e
+}
+
+type joinError struct {
+ errs []error
+}
+
+func (e *joinError) Error() string {
+ if len(e.errs) == 1 {
+ return strings.TrimSpace(e.errs[0].Error())
+ }
+ stringErrs := make([]string, 0, len(e.errs))
+ for _, subErr := range e.errs {
+ stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1))
+ }
+ return "* " + strings.Join(stringErrs, "\n* ")
+}
+
+func (e *joinError) Unwrap() []error {
+ return e.errs
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 0000000..9bbb11c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1507 @@
+// Package archive provides helper functions for dealing with archive files.
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "runtime/debug"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/containerd/log"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/klauspost/compress/zstd"
+ "github.com/moby/patternmatcher"
+ "github.com/moby/sys/sequential"
+)
+
+// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
+// tar, but that do not have their own header entry.
+//
+// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not
+// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and
+// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755.
+//
+// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is
+// subject to change in Moby at any time -- image authors who require consistent or known directory permissions
+// should explicitly control them by ensuring that header entries exist for any applicable path.
+const ImpliedDirectoryMode = 0o755
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ IDMap idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ // Allow unpacking to succeed in spite of failures to set extended
+ // attributes on the unpacked files due to the destination filesystem
+ // not supporting them or a lack of permissions. Extended attributes
+ // were probably in the archive for a reason, so set this option at
+ // your own peril.
+ BestEffortXattrs bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMapping idtools.IdentityMapping
+}
+
+// NewDefaultArchiver returns a new Archiver without any IdentityMapping
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ Uncompressed Compression = 0 // Uncompressed represents the uncompressed.
+ Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm.
+ Gzip Compression = 2 // Gzip is gzip compression algorithm.
+ Xz Compression = 3 // Xz is xz compression algorithm.
+ Zstd Compression = 4 // Zstd is zstd compression algorithm.
+)
+
+const (
+ AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts
+ OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard.
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ defer rdr.Close()
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+const (
+ zstdMagicSkippableStart = 0x184D2A50
+ zstdMagicSkippableMask = 0xFFFFFFF0
+)
+
+var (
+ bzip2Magic = []byte{0x42, 0x5A, 0x68}
+ gzipMagic = []byte{0x1F, 0x8B, 0x08}
+ xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}
+ zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
+)
+
+type matcher = func([]byte) bool
+
+func magicNumberMatcher(m []byte) matcher {
+ return func(source []byte) bool {
+ return bytes.HasPrefix(source, m)
+ }
+}
+
+// zstdMatcher detects zstd compression algorithm.
+// Zstandard compressed data is made of one or more frames.
+// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
+// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
+func zstdMatcher() matcher {
+ return func(source []byte) bool {
+ if bytes.HasPrefix(source, zstdMagic) {
+ // Zstandard frame
+ return true
+ }
+ // skippable frame
+ if len(source) < 8 {
+ return false
+ }
+ // magic number from 0x184D2A50 to 0x184D2A5F.
+ if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
+ return true
+ }
+ return false
+ }
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ compressionMap := map[Compression]matcher{
+ Bzip2: magicNumberMatcher(bzip2Magic),
+ Gzip: magicNumberMatcher(gzipMagic),
+ Xz: magicNumberMatcher(xzMagic),
+ Zstd: zstdMatcher(),
+ }
+ for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} {
+ fn := compressionMap[compression]
+ if fn(source) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
+}
+
+func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+ if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" {
+ noPigz, err := strconv.ParseBool(noPigzEnv)
+ if err != nil {
+ log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
+ }
+ if noPigz {
+ log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
+ return gzip.NewReader(buf)
+ }
+ }
+
+ unpigzPath, err := exec.LookPath("unpigz")
+ if err != nil {
+ log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library")
+ return gzip.NewReader(buf)
+ }
+
+ log.G(ctx).Debugf("Using %s to decompress", unpigzPath)
+
+ return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
+}
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+ closed atomic.Bool
+}
+
+func (r *readCloserWrapper) Close() error {
+ if !r.closed.CompareAndSwap(false, true) {
+ log.G(context.TODO()).Error("subsequent attempt to close readCloserWrapper")
+ if log.GetLevel() >= log.DebugLevel {
+ log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
+ }
+
+ return nil
+ }
+ if r.closer != nil {
+ return r.closer()
+ }
+ return nil
+}
+
+var bufioReader32KPool = &sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
+}
+
+type bufferedReader struct {
+ buf *bufio.Reader
+}
+
+func newBufferedReader(r io.Reader) *bufferedReader {
+ buf := bufioReader32KPool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return &bufferedReader{buf}
+}
+
+func (r *bufferedReader) Read(p []byte) (int, error) {
+ if r.buf == nil {
+ return 0, io.EOF
+ }
+ n, err := r.buf.Read(p)
+ if err == io.EOF {
+ r.buf.Reset(nil)
+ bufioReader32KPool.Put(r.buf)
+ r.buf = nil
+ }
+ return n, err
+}
+
+func (r *bufferedReader) Peek(n int) ([]byte, error) {
+ if r.buf == nil {
+ return nil, io.EOF
+ }
+ return r.buf.Peek(n)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ buf := newBufferedReader(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ return &readCloserWrapper{
+ Reader: buf,
+ }, nil
+ case Gzip:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ gzReader, err := gzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ return &readCloserWrapper{
+ Reader: gzReader,
+ closer: func() error {
+ cancel()
+ return gzReader.Close()
+ },
+ }, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ return &readCloserWrapper{
+ Reader: bz2Reader,
+ }, nil
+ case Xz:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ xzReader, err := xzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ return &readCloserWrapper{
+ Reader: xzReader,
+ closer: func() error {
+ cancel()
+ return xzReader.Close()
+ },
+ }, nil
+ case Zstd:
+ zstdReader, err := zstd.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &readCloserWrapper{
+ Reader: zstdReader,
+ closer: func() error {
+ zstdReader.Close()
+ return nil
+ },
+ }, nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (nopWriteCloser) Close() error { return nil }
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ switch compression {
+ case Uncompressed:
+ return nopWriteCloser{dest}, nil
+ case Gzip:
+ return gzip.NewWriter(dest), nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ if header.Name == "" {
+ header.Name = name
+ }
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if err := copyWithBuffer(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ case Zstd:
+ return "tar.zst"
+ }
+ return ""
+}
+
+// assert that we implement [tar.FileInfoNames].
+//
+// TODO(thaJeztah): disabled to allow compiling on < go1.23. un-comment once we drop support for older versions of go.
+// var _ tar.FileInfoNames = (*nosysFileInfo)(nil)
+
+// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
+// prevent tar.FileInfoHeader from introspecting it and potentially calling into
+// glibc.
+//
+// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader]
+// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102
+type nosysFileInfo struct {
+ os.FileInfo
+}
+
+// Uname stubs out looking up username. It implements [tar.FileInfoNames]
+// to prevent [tar.FileInfoHeader] from loading libraries to perform
+// username lookups.
+func (fi nosysFileInfo) Uname() (string, error) {
+ return "", nil
+}
+
+// Gname stubs out looking up group-name. It implements [tar.FileInfoNames]
+// to prevent [tar.FileInfoHeader] from loading libraries to perform
+// username lookups.
+func (fi nosysFileInfo) Gname() (string, error) {
+ return "", nil
+}
+
+func (fi nosysFileInfo) Sys() interface{} {
+ // A Sys value of type *tar.Header is safe as it is system-independent.
+ // The tar.FileInfoHeader function copies the fields into the returned
+ // header without performing any OS lookups.
+ if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
+ return sys
+ }
+ return nil
+}
+
+// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, hdr *tar.Header) error
+
+// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
+//
+// Compared to the archive/tar.FileInfoHeader function, this function is safe to
+// call from a chrooted process as it does not populate fields which would
+// require operating system lookups. It behaves identically to
+// tar.FileInfoHeader when fi is a FileInfo value returned from
+// tar.Header.FileInfo().
+//
+// When fi is a FileInfo for a native file, such as returned from os.Stat() and
+// os.Lstat(), the returned Header value differs from one returned from
+// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
+// set as OS lookups would be required to populate them. The AccessTime and
+// ChangeTime fields are not currently set (not yet implemented) although that
+// is subject to change. Callers which require the AccessTime or ChangeTime
+// fields to be zeroed should explicitly zero them out in the returned Header
+// value to avoid any compatibility issues in the future.
+func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
+ if err != nil {
+ return nil, err
+ }
+ if sysStat != nil {
+ return hdr, sysStat(fi, hdr)
+ }
+ return hdr, nil
+}
+
+// FileInfoHeader creates a populated Header from fi.
+//
+// Compared to the archive/tar package, this function fills in less information
+// but is safe to call from a chrooted process. The AccessTime and ChangeTime
+// fields are not set in the returned header, ModTime is truncated to one-second
+// precision, and the Uname and Gname fields are only set when fi is a FileInfo
+// value returned from tar.Header.FileInfo().
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := FileInfoHeaderNoLookups(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+ hdr.Name = canonicalTarName(name, fi.IsDir())
+ return hdr, nil
+}
+
+const paxSchilyXattr = "SCHILY.xattr."
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ const (
+ // Values based on linux/include/uapi/linux/capability.h
+ xattrCapsSz2 = 20
+ versionOffset = 3
+ vfsCapRevision2 = 2
+ vfsCapRevision3 = 3
+ )
+ capability, _ := lgetxattr(path, "security.capability")
+ if capability != nil {
+ if capability[versionOffset] == vfsCapRevision3 {
+ // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
+ // sense outside the user namespace the archive is built in.
+ capability[versionOffset] = vfsCapRevision2
+ capability = capability[:xattrCapsSz2]
+ }
+ if hdr.PAXRecords == nil {
+ hdr.PAXRecords = make(map[string]string)
+ }
+ hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IdentityMapping idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ IdentityMapping: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent POSIX-style
+// path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) string {
+ name = filepath.ToSlash(name)
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ // check whether the file is overlayfs whiteout
+ // if yes, skip re-mapping container ID mappings.
+ isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
+
+ // handle re-mapping container ID mappings back to host ID mappings before
+ // writing tar headers/files. We skip whiteout files because they were written
+ // by the kernel and already have proper ownership relative to the host
+ if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use sequential file access to avoid depleting the standby list on
+ // Windows. On Linux, this equates to a regular os.Open.
+ file, err := sequential.Open(path)
+ if err != nil {
+ return err
+ }
+
+ err = copyWithBuffer(ta.TarWriter, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error {
+ var (
+ Lchown = true
+ inUserns, bestEffortXattrs bool
+ chownOpts *idtools.Identity
+ )
+
+ // TODO(thaJeztah): make opts a required argument.
+ if opts != nil {
+ Lchown = !opts.NoLchown
+ inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally.
+ chownOpts = opts.ChownOpts
+ bestEffortXattrs = opts.BestEffortXattrs
+ }
+
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg:
+ // Source is regular file. We use sequential file access to avoid depleting
+ // the standby list on Windows. On Linux, this equates to a regular os.OpenFile.
+ file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if err := copyWithBuffer(file, reader); err != nil {
+ _ = file.Close()
+ return err
+ }
+ _ = file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns")
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ if inUserns && errors.Is(err, syscall.EPERM) {
+ // In most cases, cannot create a fifo if running in user namespace
+ log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns")
+ return nil
+ }
+ return err
+ }
+
+ case tar.TypeLink:
+ // #nosec G305 -- The target path is checked for path traversal.
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal.
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ var msg string
+ if inUserns && errors.Is(err, syscall.EINVAL) {
+ msg = " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)"
+ }
+ return fmt.Errorf("failed to Lchown %q for UID %d, GID %d%s: %w", path, hdr.Uid, hdr.Gid, msg, err)
+ }
+ }
+
+ var xattrErrs []string
+ for key, value := range hdr.PAXRecords {
+ xattr, ok := strings.CutPrefix(key, paxSchilyXattr)
+ if !ok {
+ continue
+ }
+ if err := lsetxattr(path, xattr, []byte(value), 0); err != nil {
+ if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) {
+ // EPERM occurs if modifying xattrs is not allowed. This can
+ // happen when running in userns with restrictions (ChromeOS).
+ xattrErrs = append(xattrErrs, err.Error())
+ continue
+ }
+ return err
+ }
+ }
+
+ if len(xattrErrs) > 0 {
+ log.G(context.TODO()).WithFields(log.Fields{
+ "errors": xattrErrs,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := boundTime(latestTime(hdr.AccessTime, hdr.ModTime))
+ mTime := boundTime(hdr.ModTime)
+
+ // chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := chtimes(path, aTime, mTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := chtimes(path, aTime, mTime); err != nil {
+ return err
+ }
+ } else {
+ if err := lchtimes(path, aTime, mTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+ tb, err := NewTarballer(srcPath, options)
+ if err != nil {
+ return nil, err
+ }
+ go tb.Do()
+ return tb.Reader(), nil
+}
+
+// Tarballer is a lower-level interface to TarWithOptions which gives the caller
+// control over which goroutine the archiving operation executes on.
+type Tarballer struct {
+ srcPath string
+ options *TarOptions
+ pm *patternmatcher.PatternMatcher
+ pipeReader *io.PipeReader
+ pipeWriter *io.PipeWriter
+ compressWriter io.WriteCloser
+ whiteoutConverter tarWhiteoutConverter
+}
+
+// NewTarballer constructs a new tarballer. The arguments are the same as for
+// TarWithOptions.
+func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) {
+ pm, err := patternmatcher.New(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Tarballer{
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath: addLongPathPrefix(srcPath),
+ options: options,
+ pm: pm,
+ pipeReader: pipeReader,
+ pipeWriter: pipeWriter,
+ compressWriter: compressWriter,
+ whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
+ }, nil
+}
+
+// Reader returns the reader for the created archive.
+func (t *Tarballer) Reader() io.ReadCloser {
+ return t.pipeReader
+}
+
+// Do performs the archiving operation in the background. The resulting archive
+// can be read from t.Reader(). Do should only be called once on each Tarballer
+// instance.
+func (t *Tarballer) Do() {
+ ta := newTarAppender(
+ t.options.IDMap,
+ t.compressWriter,
+ t.options.ChownOpts,
+ )
+ ta.WhiteoutConverter = t.whiteoutConverter
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ log.G(context.TODO()).Errorf("Can't close tar writer: %s", err)
+ }
+ if err := t.compressWriter.Close(); err != nil {
+ log.G(context.TODO()).Errorf("Can't close compress writer: %s", err)
+ }
+ if err := t.pipeWriter.Close(); err != nil {
+ log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(t.srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(t.options.IncludeFiles) > 0 {
+ log.G(context.TODO()).Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(t.srcPath)
+ t.srcPath = dir
+ t.options.IncludeFiles = []string{base}
+ }
+
+ if len(t.options.IncludeFiles) == 0 {
+ t.options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range t.options.IncludeFiles {
+ rebaseName := t.options.RebaseNames[include]
+
+ var (
+ parentMatchInfo []patternmatcher.MatchInfo
+ parentDirs []string
+ )
+
+ walkRoot := getWalkRoot(t.srcPath, include)
+ filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error {
+ if err != nil {
+ log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(t.srcPath, filePath)
+ if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if t.options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ for len(parentDirs) != 0 {
+ lastParentDir := parentDirs[len(parentDirs)-1]
+ if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) {
+ break
+ }
+ parentDirs = parentDirs[:len(parentDirs)-1]
+ parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
+ }
+
+ var matchInfo patternmatcher.MatchInfo
+ if len(parentMatchInfo) != 0 {
+ skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
+ } else {
+ skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{})
+ }
+ if err != nil {
+ log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+
+ if f.IsDir() {
+ parentDirs = append(parentDirs, relFilePath)
+ parentMatchInfo = append(parentMatchInfo, matchInfo)
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !t.pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range t.pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+
+ var dirs []*tar.Header
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // ignore XGlobalHeader early to avoid creating parent directories for them
+ if hdr.Typeflag == tar.TypeXGlobalHeader {
+ log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name)
+ continue
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // Ensure that the parent directory exists.
+ err = createImpliedDirectories(dest, hdr, options)
+ if err != nil {
+ return err
+ }
+
+ // #nosec G305 -- The joined path is checked for path traversal.
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := remapIDs(options.IDMap, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, tr, options); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do
+// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is
+// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus
+// we most both create them and choose metadata like permissions.
+//
+// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS
+// on which the daemon is running. This precondition is required because this function assumes a OS-specific path
+// separator when checking that a path is not the root.
+func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error {
+ // Not the root directory, ensure that the parent directory exists
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some
+ // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche
+ // usage that reduces the portability of an image.
+ rootIDs := options.IDMap.RootPair()
+
+ err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+//
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ IDMap: archiver.IDMapping,
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ IDMap: archiver.IDMapping,
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMapping.RootPair()
+ // Create dst, copy src's content into it
+ if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
+ return err
+ }
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := FileInfoHeaderNoLookups(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMapping, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if err := copyWithBuffer(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IdentityMapping returns the IdentityMapping of the archiver.
+func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping {
+ return archiver.IDMapping
+}
+
+func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error {
+ ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ // Ensure the command has exited before we clean anything up
+ done := make(chan struct{})
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(done)
+ }()
+
+ return &readCloserWrapper{
+ Reader: pipeR,
+ closer: func() error {
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
+ err := pipeR.Close()
+ <-done
+ return err
+ },
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 0000000..7b6c3e0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,107 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/moby/sys/userns"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0o600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir == 0 {
+ // FIXME(thaJeztah): return a sentinel error instead of nil, nil
+ return nil, nil
+ }
+
+ opaqueXattrName := "trusted.overlay.opaque"
+ if userns.RunningInUserNS() {
+ opaqueXattrName = "user.overlay.opaque"
+ }
+
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := lgetxattr(path, opaqueXattrName)
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) != 1 || opaque[0] != 'y' {
+ // FIXME(thaJeztah): return a sentinel error instead of nil, nil
+ return nil, nil
+ }
+ delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName)
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ return &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted.
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }, nil
+}
+
+func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ opaqueXattrName := "trusted.overlay.opaque"
+ if userns.RunningInUserNS() {
+ opaqueXattrName = "user.overlay.opaque"
+ }
+
+ err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0)
+ if err != nil {
+ return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err)
+ }
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err)
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 0000000..6495549
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+//go:build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 0000000..bc6b25a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,126 @@
+//go:build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/idtools"
+ "golang.org/x/sys/unix"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+// addLongPathPrefix adds the Windows long path prefix to the path provided if
+// it does not already have it. It is a no-op on platforms other than Windows.
+func addLongPathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+// statUnix populates hdr from system-dependent fields of fi without performing
+// any OS lookups.
+func statUnix(fi os.FileInfo, hdr *tar.Header) error {
+ // Devmajor and Devminor are only needed for special devices.
+
+ // In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
+ // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
+ // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
+
+ // ZFS in particular does not override the default:
+ // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
+
+ // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
+ // Such large values cannot be encoded in a tar header.
+ if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
+ return nil
+ }
+ s, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ hdr.Uid = int(s.Uid)
+ hdr.Gid = int(s.Gid)
+
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
+ }
+
+ return nil
+}
+
+func getInodeFromStat(stat interface{}) (uint64, error) {
+ s, ok := stat.(*syscall.Stat_t)
+ if !ok {
+ // FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897
+ return 0, nil
+ }
+ return s.Ino, nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo.
+//
+// Creating device nodes is not supported when running in a user namespace,
+// produces a [syscall.EPERM] in most cases.
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 0o7777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 0000000..fd2546e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,69 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+)
+
+// longPathPrefix is the longpath prefix for Windows file paths.
+const longPathPrefix = `\\?\`
+
+// addLongPathPrefix adds the Windows long path prefix to the path provided if
+// it does not already have it. It is a no-op on platforms other than Windows.
+//
+// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix].
+func addLongPathPrefix(srcPath string) string {
+ if strings.HasPrefix(srcPath, longPathPrefix) {
+ return srcPath
+ }
+ if strings.HasPrefix(srcPath, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ return longPathPrefix + `UNC` + srcPath[1:]
+ }
+ return longPathPrefix + srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ // Remove group- and world-writable bits.
+ perm &= 0o755
+
+ // Add the x bit: make everything +x on Windows
+ return perm | 0o111
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (uint64, error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return 0, nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.Identity{UID: 0, GID: 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 0000000..1c0509d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,430 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/containerd/log"
+ "github.com/docker/docker/pkg/idtools"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ ChangeModify = 0 // ChangeModify represents the modify operation.
+ ChangeAdd = 1 // ChangeAdd represents the add operation.
+ ChangeDelete = 2 // ChangeDelete represents the delete operation.
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar doesn't have sub-second mtime precision. The go tar
+// writer (1.10+) does when using PAX format, but we round times to seconds
+// to ensure archives have the same hashes for backwards compatibility.
+// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
+//
+// Non-sub-second is problematic when we apply changes via tar
+// files. We handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a.Equal(b) ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return skip, err
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type (
+ skipChange func(string) (bool, error)
+ deleteChange func(string, string, os.FileInfo) (string, error)
+)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat fs.FileInfo
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var oldRoot, newRoot *FileInfo
+ if oldDir == "" {
+ emptyDir, err := os.MkdirTemp("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idMap, writer, nil)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ log.G(context.TODO()).Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ log.G(context.TODO()).Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 0000000..9a041b0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,281 @@
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ info.stat = fi
+ info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch strings.Compare(ni1.name, ni2.name) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited"
+ name := string(b[0:clen(b[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 0000000..a8a3a5a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,95 @@
+//go:build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ s, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ stat: s,
+ }
+
+ info.capability, _ = lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 0000000..4dd98bd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,43 @@
+//go:build !windows
+
+package archive
+
+import (
+ "io/fs"
+ "os"
+ "syscall"
+)
+
+func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
+ oldSys := oldStat.Sys().(*syscall.Stat_t)
+ newSys := newStat.Sys().(*syscall.Stat_t)
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldSys.Uid != newSys.Uid ||
+ oldSys.Gid != newSys.Gid ||
+ oldSys.Rdev != newSys.Rdev ||
+ // Don't look at size or modification time for dirs, its not a good
+ // measure of change. See https://github.com/moby/moby/issues/9874
+ // for a description of the issue with modification time, and
+ // https://github.com/moby/moby/pull/11422 for the change.
+ // (Note that in the Windows implementation of this function,
+ // modification time IS taken as a change). See
+ // https://github.com/moby/moby/pull/37982 for more information.
+ (!oldStat.Mode().IsDir() &&
+ (!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 0000000..c89605c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,33 @@
+package archive
+
+import (
+ "io/fs"
+ "os"
+)
+
+func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool {
+ // Note there is slight difference between the Linux and Windows
+ // implementations here. Due to https://github.com/moby/moby/issues/9874,
+ // and the fix at https://github.com/moby/moby/pull/11422, Linux does not
+ // consider a change to the directory time as a change. Windows on NTFS
+ // does. See https://github.com/moby/moby/pull/37982 for more information.
+
+ if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 0000000..cae0173
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,497 @@
+package archive
+
+import (
+ "archive/tar"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/containerd/log"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+var copyPool = sync.Pool{
+ New: func() interface{} { s := make([]byte, 32*1024); return &s },
+}
+
+func copyWithBuffer(dst io.Writer, src io.Reader) error {
+ buf := copyPool.Get().(*[]byte)
+ _, err := io.CopyBuffer(dst, src, *buf)
+ copyPool.Put(buf)
+ return err
+}
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = normalizePath(cleanedPath)
+ originalPath = normalizePath(originalPath)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+ return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && path[len(path)-1] == filepath.Separator
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err := os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return nil, err
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !filepath.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, io.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // srcContent tar stream, as served by TarWithOptions(), is
+ // definitely in PAX format, but tar.Next() mistakenly guesses it
+ // as USTAR, which creates a problem: if the newBase is >100
+ // characters long, WriteHeader() returns an error like
+ // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
+ //
+ // To fix, set the format to PAX here. See docker/for-linux issue #484.
+ hdr.Format = tar.FormatPAX
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
+ // and https://cure53.de/pentest-report_opa.pdf, which recommends to
+ // replace io.Copy with io.CopyN7. The latter allows to specify the
+ // maximum number of bytes that should be read. By properly defining
+ // the limit, it can be assured that a GZip compression bomb cannot
+ // easily cause a Denial-of-Service.
+ // After reviewing with @tonistiigi and @cpuguy83, this should not
+ // affect us, because here we do not read into memory, hence should
+ // not be vulnerable to this code consuming memory.
+ //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) {
+ if followLink {
+ var err error
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return "", "", err
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return "", "", err
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path) &&
+ !hasTrailingPathSeparator(resolvedPath) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 0000000..f579282
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+//go:build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 0000000..2b775b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go b/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go
new file mode 100644
index 0000000..aa8e291
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go
@@ -0,0 +1,7 @@
+//go:build freebsd
+
+package archive
+
+import "golang.org/x/sys/unix"
+
+var mknod = unix.Mknod
diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_unix.go b/vendor/github.com/docker/docker/pkg/archive/dev_unix.go
new file mode 100644
index 0000000..dffc596
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/dev_unix.go
@@ -0,0 +1,9 @@
+//go:build !windows && !freebsd
+
+package archive
+
+import "golang.org/x/sys/unix"
+
+func mknod(path string, mode uint32, dev uint64) error {
+ return unix.Mknod(path, mode, int(dev))
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 0000000..d5a394c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,258 @@
+package archive
+
+import (
+ "archive/tar"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containerd/log"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Ensure that the parent directory exists.
+ err = createImpliedDirectories(dest, hdr, options)
+ if err != nil {
+ return 0, err
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ // #nosec G305 -- The joined path is guarded against path traversal.
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ return os.RemoveAll(path)
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ srcData := io.Reader(tr)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(options.IDMap, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
+ path := filepath.Join(dest, hdr.Name)
+ if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// IsEmpty checks if the tar archive is empty (doesn't contain any entries).
+func IsEmpty(rd io.Reader) (bool, error) {
+ decompRd, err := DecompressStream(rd)
+ if err != nil {
+ return true, fmt.Errorf("failed to decompress archive: %v", err)
+ }
+ defer decompRd.Close()
+
+ tarReader := tar.NewReader(decompRd)
+ if _, err := tarReader.Next(); err != nil {
+ if err == io.EOF {
+ return true, nil
+ }
+ return false, fmt.Errorf("failed to read next archive header: %v", err)
+ }
+
+ return false, nil
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ restore := overrideUmask(0)
+ defer restore()
+
+ if decompress {
+ decompLayer, err := DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompLayer.Close()
+ layer = decompLayer
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go
new file mode 100644
index 0000000..7216f2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go
@@ -0,0 +1,21 @@
+//go:build !windows
+
+package archive
+
+import "golang.org/x/sys/unix"
+
+// overrideUmask sets current process's file mode creation mask to newmask
+// and returns a function to restore it.
+//
+// WARNING for readers stumbling upon this code. Changing umask in a multi-
+// threaded environment isn't safe. Don't use this without understanding the
+// risks, and don't export this function for others to use (we shouldn't even
+// be using this ourself).
+//
+// FIXME(thaJeztah): we should get rid of these hacks if possible.
+func overrideUmask(newMask int) func() {
+ oldMask := unix.Umask(newMask)
+ return func() {
+ unix.Umask(oldMask)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go
new file mode 100644
index 0000000..d28f5b2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go
@@ -0,0 +1,6 @@
+package archive
+
+// overrideUmask is a no-op on windows.
+func overrideUmask(newmask int) func() {
+ return func() {}
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path.go b/vendor/github.com/docker/docker/pkg/archive/path.go
new file mode 100644
index 0000000..888a697
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/path.go
@@ -0,0 +1,20 @@
+package archive
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive.
+// On Linux: this is a no-op.
+// On Windows: this does the following>
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be concatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return checkSystemDriveAndRemoveDriveLetter(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go
new file mode 100644
index 0000000..390264b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/path_unix.go
@@ -0,0 +1,9 @@
+//go:build !windows
+
+package archive
+
+// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation
+// of CheckSystemDriveAndRemoveDriveLetter
+func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/path_windows.go b/vendor/github.com/docker/docker/pkg/archive/path_windows.go
new file mode 100644
index 0000000..7e18c8e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/path_windows.go
@@ -0,0 +1,22 @@
+package archive
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
+// of CheckSystemDriveAndRemoveDriveLetter
+func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("no relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("the specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time.go b/vendor/github.com/docker/docker/pkg/archive/time.go
new file mode 100644
index 0000000..4e9ae95
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time.go
@@ -0,0 +1,38 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+var (
+ minTime = time.Unix(0, 0)
+ maxTime time.Time
+)
+
+func init() {
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
+
+func boundTime(t time.Time) time.Time {
+ if t.Before(minTime) || t.After(maxTime) {
+ return minTime
+ }
+
+ return t
+}
+
+func latestTime(t1, t2 time.Time) time.Time {
+ if t1.Before(t2) {
+ return t2
+ }
+ return t1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go b/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go
new file mode 100644
index 0000000..5bfdfa2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go
@@ -0,0 +1,41 @@
+//go:build !windows
+
+package archive
+
+import (
+ "os"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// chtimes changes the access time and modified time of a file at the given path.
+// If the modified time is prior to the Unix Epoch (unixMinTime), or after the
+// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this
+// case, Chtimes defaults to Unix Epoch, just in case.
+func chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
+
+func timeToTimespec(time time.Time) unix.Timespec {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ return unix.Timespec{
+ Sec: 0,
+ Nsec: (1 << 30) - 2,
+ }
+ }
+ return unix.NsecToTimespec(time.UnixNano())
+}
+
+func lchtimes(name string, atime time.Time, mtime time.Time) error {
+ utimes := [2]unix.Timespec{
+ timeToTimespec(atime),
+ timeToTimespec(mtime),
+ }
+ err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil && err != unix.ENOSYS {
+ return err
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_windows.go b/vendor/github.com/docker/docker/pkg/archive/time_windows.go
new file mode 100644
index 0000000..af1f7c8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_windows.go
@@ -0,0 +1,32 @@
+package archive
+
+import (
+ "os"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+func chtimes(name string, atime time.Time, mtime time.Time) error {
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ pathp, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ h, err := windows.CreateFile(pathp,
+ windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+ windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if err != nil {
+ return err
+ }
+ defer windows.Close(h)
+ c := windows.NsecToFiletime(mtime.UnixNano())
+ return windows.SetFileTime(h, &c, nil, nil)
+}
+
+func lchtimes(name string, atime time.Time, mtime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 0000000..d20478a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 0000000..f8a9725
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// - ./foo.txt with content "hello world"
+// - ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) [][2]string {
+ output := make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return output
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go
new file mode 100644
index 0000000..652a1f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go
@@ -0,0 +1,52 @@
+//go:build linux || darwin || freebsd || netbsd
+
+package archive
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+
+ "golang.org/x/sys/unix"
+)
+
+// lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It returns a nil slice and nil error if the xattr is not set.
+func lgetxattr(path string, attr string) ([]byte, error) {
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, err := unix.Lgetxattr(path, attr, dest)
+
+ for errors.Is(err, unix.ERANGE) {
+ // Buffer too small, use zero-sized buffer to get the actual size
+ sz, err = unix.Lgetxattr(path, attr, []byte{})
+ if err != nil {
+ return nil, wrapPathError("lgetxattr", path, attr, err)
+ }
+ dest = make([]byte, sz)
+ sz, err = unix.Lgetxattr(path, attr, dest)
+ }
+
+ if err != nil {
+ if errors.Is(err, noattr) {
+ return nil, nil
+ }
+ return nil, wrapPathError("lgetxattr", path, attr, err)
+ }
+
+ return dest[:sz], nil
+}
+
+// lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func lsetxattr(path string, attr string, data []byte, flags int) error {
+ return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags))
+}
+
+func wrapPathError(op, path, attr string, err error) error {
+ if err == nil {
+ return nil
+ }
+ return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)}
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go
new file mode 100644
index 0000000..f2e7646
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go
@@ -0,0 +1,5 @@
+package archive
+
+import "golang.org/x/sys/unix"
+
+var noattr = unix.ENODATA
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go
new file mode 100644
index 0000000..4d88241
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go
@@ -0,0 +1,7 @@
+//go:build !linux && !windows
+
+package archive
+
+import "golang.org/x/sys/unix"
+
+var noattr = unix.ENOATTR
diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go
new file mode 100644
index 0000000..b0d9165
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go
@@ -0,0 +1,11 @@
+//go:build !linux && !darwin && !freebsd && !netbsd
+
+package archive
+
+func lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, nil
+}
+
+func lsetxattr(path string, attr string, data []byte, flags int) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 0000000..d2fbd94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,149 @@
+package idtools
+
+import (
+ "fmt"
+ "os"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership and permissions.
+func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, true, true)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership and permissions.
+// Note that unlike os.Mkdir(), this function does not return IsExist error
+// in case path already exists.
+func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, false, true)
+}
+
+// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership or permissions will be performed
+func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, true, false)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// Identity is either a UID and GID pair or a SID (but not both)
+type Identity struct {
+ UID int
+ GID int
+ SID string
+}
+
+// Chown changes the numeric uid and gid of the named file to id.UID and id.GID.
+func (id Identity) Chown(name string) error {
+ return os.Chown(name, id.UID, id.GID)
+}
+
+// IdentityMapping contains a mappings of UIDs and GIDs.
+// The zero value represents an empty mapping.
+type IdentityMapping struct {
+ UIDMaps []IDMap `json:"UIDMaps"`
+ GIDMaps []IDMap `json:"GIDMaps"`
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i IdentityMapping) RootPair() Identity {
+ uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps)
+ return Identity{UID: uid, GID: gid}
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i IdentityMapping) ToHost(pair Identity) (Identity, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = toHost(pair.UID, i.UIDMaps)
+ if err != nil {
+ return target, err
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = toHost(pair.GID, i.GIDMaps)
+ }
+ return target, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) {
+ uid, err := toContainer(pair.UID, i.UIDMaps)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toContainer(pair.GID, i.GIDMaps)
+ return uid, gid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i IdentityMapping) Empty() bool {
+ return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0
+}
+
+// CurrentIdentity returns the identity of the current process
+func CurrentIdentity() Identity {
+ return Identity{UID: os.Getuid(), GID: os.Getegid()}
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 0000000..1f11fe4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,166 @@
+//go:build !windows
+
+package idtools
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ "github.com/moby/sys/user"
+)
+
+func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+
+ stat, err := os.Stat(path)
+ if err == nil {
+ if !stat.IsDir() {
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+ if !chownExisting {
+ return nil
+ }
+
+ // short-circuit -- we were called with an existing directory and chown was requested
+ return setPermissions(path, mode, owner, stat)
+ }
+
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if os.IsNotExist(err) {
+ paths = []string{path}
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err = os.MkdirAll(path, mode); err != nil {
+ return err
+ }
+ } else if err = os.Mkdir(path, mode); err != nil {
+ return err
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err = setPermissions(pathComponent, mode, owner, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username
+//
+// Deprecated: use [user.LookupUser] instead
+func LookupUser(name string) (user.User, error) {
+ return user.LookupUser(name)
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid
+//
+// Deprecated: use [user.LookupUid] instead
+func LookupUID(uid int) (user.User, error) {
+ return user.LookupUid(uid)
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+//
+// Deprecated: use [user.LookupGroup] instead
+func LookupGroup(name string) (user.Group, error) {
+ return user.LookupGroup(name)
+}
+
+// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
+// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
+// dir is on an NFS share, so don't call chown unless we absolutely must.
+// Likewise for setting permissions.
+func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error {
+ if stat == nil {
+ var err error
+ stat, err = os.Stat(p)
+ if err != nil {
+ return err
+ }
+ }
+ if stat.Mode().Perm() != mode.Perm() {
+ if err := os.Chmod(p, mode.Perm()); err != nil {
+ return err
+ }
+ }
+ ssi := stat.Sys().(*syscall.Stat_t)
+ if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) {
+ return nil
+ }
+ return os.Chown(p, owner.UID, owner.GID)
+}
+
+// LoadIdentityMapping takes a requested username and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func LoadIdentityMapping(name string) (IdentityMapping, error) {
+ // TODO: Consider adding support for calling out to "getent"
+ usr, err := user.LookupUser(name)
+ if err != nil {
+ return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err)
+ }
+
+ subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
+ if err != nil {
+ return IdentityMapping{}, err
+ }
+ subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
+ if err != nil {
+ return IdentityMapping{}, err
+ }
+
+ return IdentityMapping{
+ UIDMaps: subuidRanges,
+ GIDMaps: subgidRanges,
+ }, nil
+}
+
+func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) {
+ uidstr := strconv.Itoa(usr.Uid)
+ rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool {
+ return sid.Name == usr.Name || sid.Name == uidstr
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(rangeList) == 0 {
+ return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
+ }
+
+ idMap := []IDMap{}
+
+ containerID := 0
+ for _, idrange := range rangeList {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: int(idrange.SubID),
+ Size: int(idrange.Count),
+ })
+ containerID = containerID + int(idrange.Count)
+ }
+ return idMap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 0000000..a12b140
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,24 @@
+package idtools
+
+import (
+ "os"
+)
+
+const (
+ SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
+)
+
+// TODO(thaJeztah): these magic consts need a source of reference, and should be defined in a canonical location
+const (
+ ContainerAdministratorSidString = "S-1-5-93-2-1"
+
+ ContainerUserSidString = "S-1-5-93-2-2"
+)
+
+// This is currently a wrapper around [os.MkdirAll] since currently
+// permissions aren't set through this path, the identity isn't utilized.
+// Ownership is handled elsewhere, but in the future could be support here
+// too.
+func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error {
+ return os.MkdirAll(path, 0)
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 0000000..1a05de4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,314 @@
+package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage"
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+ "github.com/moby/term"
+ "github.com/morikuni/aec"
+)
+
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// JSONError wraps a concrete Code and Message, Code is
+// an integer error code, Message is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a progress message in a JSON stream.
+type JSONProgress struct {
+ // Current is the current status and value of the progress made towards Total.
+ Current int64 `json:"current,omitempty"`
+ // Total is the end value describing when we made 100% progress for an operation.
+ Total int64 `json:"total,omitempty"`
+ // Start is the initial value for the operation.
+ Start int64 `json:"start,omitempty"`
+ // HideCounts. if true, hides the progress count indicator (xB/yB).
+ HideCounts bool `json:"hidecounts,omitempty"`
+ // Units is the unit to print for progress. It defaults to "bytes" if empty.
+ Units string `json:"units,omitempty"`
+
+ // terminalFd is the fd of the current terminal, if any. It is used
+ // to get the terminal width.
+ terminalFd uintptr
+
+ // nowFunc is used to override the current time in tests.
+ nowFunc func() time.Time
+
+ // winSize is used to override the terminal width in tests.
+ winSize int
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = p.width()
+ pbBox string
+ numbersBox string
+ )
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ if p.Total <= 0 {
+ switch p.Units {
+ case "":
+ return fmt.Sprintf("%8v", units.HumanSize(float64(p.Current)))
+ default:
+ return fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ switch {
+ case p.HideCounts:
+ case p.Units == "": // no units, use bytes
+ current := units.HumanSize(float64(p.Current))
+ total := units.HumanSize(float64(p.Total))
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+ default:
+ numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ // Show approximation of remaining time if there's enough width.
+ var timeLeftBox string
+ if width > 50 {
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := p.now().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ timeLeftBox = " " + left.Round(time.Second).String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// now returns the current time in UTC, but can be overridden in tests
+// by setting JSONProgress.nowFunc to a custom function.
+func (p *JSONProgress) now() time.Time {
+ if p.nowFunc != nil {
+ return p.nowFunc()
+ }
+ return time.Now().UTC()
+}
+
+// width returns the current terminal's width, but can be overridden
+// in tests by setting JSONProgress.winSize to a non-zero value.
+func (p *JSONProgress) width() int {
+ if p.winSize != 0 {
+ return p.winSize
+ }
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ return int(ws.Width)
+ }
+ return 200
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+
+ // ProgressMessage is a pre-formatted presentation of [Progress].
+ //
+ // Deprecated: this field is deprecated since docker v0.7.1 / API v1.8. Use the information in [Progress] instead. This field will be omitted in a future release.
+ ProgressMessage string `json:"progress,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+
+ // ErrorMessage contains errors encountered during the operation.
+ //
+ // Deprecated: this field is deprecated since docker v0.6.0 / API v1.4. Use [Error.Message] instead. This field will be omitted in a future release.
+ ErrorMessage string `json:"error,omitempty"` // deprecated
+ // Aux contains out-of-band data, such as digests for push signing and image id after building.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+func clearLine(out io.Writer) {
+ eraseMode := aec.EraseModes.All
+ cl := aec.EraseLine(eraseMode)
+ fmt.Fprint(out, cl)
+}
+
+func cursorUp(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Up(l))
+}
+
+func cursorDown(out io.Writer, l uint) {
+ fmt.Fprint(out, aec.Down(l))
+}
+
+// Display prints the JSONMessage to out. If isTerminal is true, it erases
+// the entire current line when displaying the progressbar. It returns an
+// error if the [JSONMessage.Error] field is non-nil.
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
+ if jm.Error != nil {
+ return jm.Error
+ }
+ var endl string
+ if isTerminal && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out)
+ endl = "\r"
+ fmt.Fprint(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && isTerminal {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { // deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream reads a JSON message stream from in, and writes
+// each [JSONMessage] to out. It returns an error if an invalid JSONMessage
+// is received, or if a JSONMessage containers a non-zero [JSONMessage.Error].
+//
+// Presentation of the JSONMessage depends on whether a terminal is attached,
+// and on the terminal width. Progress bars ([JSONProgress]) are suppressed
+// on narrower terminals (< 110 characters).
+//
+// - isTerminal describes if out is a terminal, in which case it prints
+// a newline ("\n") at the end of each line and moves the cursor while
+// displaying.
+// - terminalFd is the fd of the current terminal (if any), and used
+// to get the terminal width.
+// - auxCallback allows handling the [JSONMessage.Aux] field. It is
+// called if a JSONMessage contains an Aux field, in which case
+// DisplayJSONMessagesStream does not present the JSONMessage.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]uint)
+ )
+
+ for {
+ var diff uint
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = uint(len(ids))
+ ids[jm.ID] = line
+ if isTerminal {
+ fmt.Fprintf(out, "\n")
+ }
+ }
+ diff = uint(len(ids)) - line
+ if isTerminal {
+ cursorUp(out, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]uint)
+ }
+ err := jm.Display(out, isTerminal)
+ if jm.ID != "" && isTerminal {
+ cursorDown(out, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stream is an io.Writer for output with utilities to get the output's file
+// descriptor and to detect whether it's a terminal.
+//
+// it is subset of the streams.Out type in
+// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out
+type Stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output Stream. It is
+// used by the Docker CLI to print JSONMessage streams.
+func DisplayJSONMessagesToStream(in io.Reader, stream Stream, auxCallback func(JSONMessage)) error {
+ return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 0000000..854e4c3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,190 @@
+package stdcopy // import "github.com/docker/docker/pkg/stdcopy"
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
+const (
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+ // Systemerr represents errors originating from the system that make it
+ // into the multiplexed stream.
+ Systemerr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
+)
+
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
+ io.Writer
+ prefix byte
+}
+
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (int, error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("writer not instantiated")
+ }
+ if p == nil {
+ return 0, nil
+ }
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err := w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
+ if n < 0 {
+ n = 0
+ }
+
+ buf.Reset()
+ bufPool.Put(buf)
+ return n, err
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
+ }
+}
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, startingBufLen)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ stream := StdType(buf[stdWriterFdIndex])
+ // Check the first byte to know where to write
+ switch stream {
+ case Stdin:
+ fallthrough
+ case Stdout:
+ // Write on stdout
+ out = dstout
+ case Stderr:
+ // Write on stderr
+ out = dsterr
+ case Systemerr:
+ // If we're on Systemerr, we won't write anywhere.
+ // NB: if this code changes later, make sure you don't try to write
+ // to outstream if Systemerr is the stream
+ out = nil
+ default:
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+stdWriterPrefixLen > bufLen {
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ // we might have an error from the source mixed up in our multiplexed
+ // stream. if we do, return it.
+ if stream == Systemerr {
+ return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen]))
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if ew != nil {
+ return 0, ew
+ }
+
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + stdWriterPrefixLen
+ }
+}
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
new file mode 100644
index 0000000..4049d78
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -0,0 +1,240 @@
+// Package nat is a convenience package for manipulation of strings describing network ports.
+package nat
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// PortBinding represents a binding between a Host IP address and a Host Port
+type PortBinding struct {
+ // HostIP is the host IP Address
+ HostIP string `json:"HostIp"`
+ // HostPort is the host port number
+ HostPort string
+}
+
+// PortMap is a collection of PortBinding indexed by Port
+type PortMap map[Port][]PortBinding
+
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// NewPort creates a new instance of a Port given a protocol and port number or port range
+func NewPort(proto, port string) (Port, error) {
+ // Check for parsing issues on "port" now so we can avoid having
+ // to check it later on.
+
+ portStartInt, portEndInt, err := ParsePortRangeToInt(port)
+ if err != nil {
+ return "", err
+ }
+
+ if portStartInt == portEndInt {
+ return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
+ }
+ return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
+}
+
+// ParsePort parses the port number string and returns an int
+func ParsePort(rawPort string) (int, error) {
+ if len(rawPort) == 0 {
+ return 0, nil
+ }
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// ParsePortRangeToInt parses the port range string and returns start/end ints
+func ParsePortRangeToInt(rawPort string) (int, int, error) {
+ if len(rawPort) == 0 {
+ return 0, 0, nil
+ }
+ start, end, err := ParsePortRange(rawPort)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(start), int(end), nil
+}
+
+// Proto returns the protocol of a Port
+func (p Port) Proto() string {
+ proto, _ := SplitProtoPort(string(p))
+ return proto
+}
+
+// Port returns the port number of a Port
+func (p Port) Port() string {
+ _, port := SplitProtoPort(string(p))
+ return port
+}
+
+// Int returns the port number of a Port as an int
+func (p Port) Int() int {
+ portStr := p.Port()
+ // We don't need to check for an error because we're going to
+ // assume that any error would have been found, and reported, in NewPort()
+ port, _ := ParsePort(portStr)
+ return port
+}
+
+// Range returns the start/end port numbers of a Port range as ints
+func (p Port) Range() (int, int, error) {
+ return ParsePortRangeToInt(p.Port())
+}
+
+// SplitProtoPort splits a port in the format of proto/port
+func SplitProtoPort(rawPort string) (string, string) {
+ parts := strings.Split(rawPort, "/")
+ l := len(parts)
+ if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
+ return "", ""
+ }
+ if l == 1 {
+ return "tcp", rawPort
+ }
+ if len(parts[1]) == 0 {
+ return "tcp", parts[0]
+ }
+ return parts[1], parts[0]
+}
+
+func validateProto(proto string) bool {
+ for _, availableProto := range []string{"tcp", "udp", "sctp"} {
+ if availableProto == proto {
+ return true
+ }
+ }
+ return false
+}
+
+// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
+// these in to the internal types
+func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
+ var (
+ exposedPorts = make(map[Port]struct{}, len(ports))
+ bindings = make(map[Port][]PortBinding)
+ )
+ for _, rawPort := range ports {
+ portMappings, err := ParsePortSpec(rawPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
+ }
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+func splitParts(rawport string) (string, string, string) {
+ parts := strings.Split(rawport, ":")
+ n := len(parts)
+ containerPort := parts[n-1]
+
+ switch n {
+ case 1:
+ return "", "", containerPort
+ case 2:
+ return "", parts[0], containerPort
+ case 3:
+ return parts[0], parts[1], containerPort
+ default:
+ return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort
+ }
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ var proto string
+ ip, hostPort, containerPort := splitParts(rawPort)
+ proto, containerPort = SplitProtoPort(containerPort)
+
+ if ip != "" && ip[0] == '[' {
+ // Strip [] from IPV6 addresses
+ rawIP, _, err := net.SplitHostPort(ip + ":")
+ if err != nil {
+ return nil, fmt.Errorf("invalid IP address %v: %w", ip, err)
+ }
+ ip = rawIP
+ }
+ if ip != "" && net.ParseIP(ip) == nil {
+ return nil, fmt.Errorf("invalid IP address: %s", ip)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("no port specified: %s<empty>", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
+ if err != nil {
+ return nil, fmt.Errorf("invalid hostPort: %s", hostPort)
+ }
+ }
+
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
+ }
+ }
+
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("invalid proto: %s", proto)
+ }
+
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
+ }
+
+ binding := PortBinding{
+ HostIP: ip,
+ HostPort: hostPort,
+ }
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
+ }
+ return ports, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
new file mode 100644
index 0000000..e4b53e8
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/parse.go
@@ -0,0 +1,33 @@
+package nat
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("empty string specified for ports")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports)
+ }
+ return start, end, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
new file mode 100644
index 0000000..b6eed14
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/sort.go
@@ -0,0 +1,96 @@
+package nat
+
+import (
+ "sort"
+ "strings"
+)
+
+type portSorter struct {
+ ports []Port
+ by func(i, j Port) bool
+}
+
+func (s *portSorter) Len() int {
+ return len(s.ports)
+}
+
+func (s *portSorter) Swap(i, j int) {
+ s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
+}
+
+func (s *portSorter) Less(i, j int) bool {
+ ip := s.ports[i]
+ jp := s.ports[j]
+
+ return s.by(ip, jp)
+}
+
+// Sort sorts a list of ports using the provided predicate
+// This function should compare `i` and `j`, returning true if `i` is
+// considered to be less than `j`
+func Sort(ports []Port, predicate func(i, j Port) bool) {
+ s := &portSorter{ports, predicate}
+ sort.Sort(s)
+}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// Less sorts the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok && len(binding) > 0 {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ bindings[p] = []PortBinding{}
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ if _, ok := bindings[entry.port]; ok {
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+ }
+}
+
+func toInt(s string) uint64 {
+ i, _, err := ParsePortRange(s)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/README.md
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
new file mode 100644
index 0000000..99846ff
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -0,0 +1,81 @@
+package sockets
+
+import (
+ "errors"
+ "net"
+ "sync"
+)
+
+var errClosed = errors.New("use of closed network connection")
+
+// InmemSocket implements net.Listener using in-memory only connections.
+type InmemSocket struct {
+ chConn chan net.Conn
+ chClose chan struct{}
+ addr string
+ mu sync.Mutex
+}
+
+// dummyAddr is used to satisfy net.Addr for the in-mem socket
+// it is just stored as a string and returns the string for all calls
+type dummyAddr string
+
+// NewInmemSocket creates an in-memory only net.Listener
+// The addr argument can be any string, but is used to satisfy the `Addr()` part
+// of the net.Listener interface
+func NewInmemSocket(addr string, bufSize int) *InmemSocket {
+ return &InmemSocket{
+ chConn: make(chan net.Conn, bufSize),
+ chClose: make(chan struct{}),
+ addr: addr,
+ }
+}
+
+// Addr returns the socket's addr string to satisfy net.Listener
+func (s *InmemSocket) Addr() net.Addr {
+ return dummyAddr(s.addr)
+}
+
+// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
+func (s *InmemSocket) Accept() (net.Conn, error) {
+ select {
+ case conn := <-s.chConn:
+ return conn, nil
+ case <-s.chClose:
+ return nil, errClosed
+ }
+}
+
+// Close closes the listener. It will be unavailable for use once closed.
+func (s *InmemSocket) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ select {
+ case <-s.chClose:
+ default:
+ close(s.chClose)
+ }
+ return nil
+}
+
+// Dial is used to establish a connection with the in-mem server
+func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
+ srvConn, clientConn := net.Pipe()
+ select {
+ case s.chConn <- srvConn:
+ case <-s.chClose:
+ return nil, errClosed
+ }
+
+ return clientConn, nil
+}
+
+// Network returns the addr string, satisfies net.Addr
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+// String returns the string form
+func (a dummyAddr) String() string {
+ return string(a)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
new file mode 100644
index 0000000..c897cb0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/proxy.go
@@ -0,0 +1,28 @@
+package sockets
+
+import (
+ "net"
+ "os"
+ "strings"
+)
+
+// GetProxyEnv allows access to the uppercase and the lowercase forms of
+// proxy-related variables. See the Go specification for details on these
+// variables. https://golang.org/pkg/net/http/
+func GetProxyEnv(key string) string {
+ proxyValue := os.Getenv(strings.ToUpper(key))
+ if proxyValue == "" {
+ return os.Getenv(strings.ToLower(key))
+ }
+ return proxyValue
+}
+
+// DialerFromEnvironment was previously used to configure a net.Dialer to route
+// connections through a SOCKS proxy.
+// DEPRECATED: SOCKS proxies are now supported by configuring only
+// http.Transport.Proxy, and no longer require changing http.Transport.Dial.
+// Therefore, only sockets.ConfigureTransport() needs to be called, and any
+// sockets.DialerFromEnvironment() calls can be dropped.
+func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) {
+ return direct, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
new file mode 100644
index 0000000..b0eae23
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -0,0 +1,37 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "time"
+)
+
+const defaultTimeout = 10 * time.Second
+
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
+// ConfigureTransport configures the specified [http.Transport] according to the specified proto
+// and addr.
+//
+// If the proto is unix (using a unix socket to communicate) or npipe the compression is disabled.
+// For other protos, compression is enabled. If you want to manually enable/disable compression,
+// make sure you do it _after_ any subsequent calls to ConfigureTransport is made against the same
+// [http.Transport].
+func ConfigureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.DisableCompression = false
+ tr.DialContext = (&net.Dialer{
+ Timeout: defaultTimeout,
+ }).DialContext
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
new file mode 100644
index 0000000..78a34a9
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -0,0 +1,39 @@
+//go:build !windows
+
+package sockets
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ dialer := &net.Dialer{
+ Timeout: defaultTimeout,
+ }
+ tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return dialer.DialContext(ctx, proto, addr)
+ }
+ return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
new file mode 100644
index 0000000..7acafc5
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -0,0 +1,28 @@
+package sockets
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return winio.DialPipeContext(ctx, addr)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
new file mode 100644
index 0000000..53cbb6c
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -0,0 +1,22 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+// NewTCPSocket creates a TCP socket listener with the specified address and
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
+// TCP listener inside a TLS one.
+func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil {
+ tlsConfig.NextProtos = []string{"http/1.1"}
+ l = tls.NewListener(l, tlsConfig)
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
new file mode 100644
index 0000000..b923352
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -0,0 +1,126 @@
+//go:build !windows
+
+/*
+Package sockets is a simple unix domain socket wrapper.
+
+# Usage
+
+For example:
+
+ import(
+ "fmt"
+ "net"
+ "os"
+ "github.com/docker/go-connections/sockets"
+ )
+
+ func main() {
+ l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets",
+ sockets.WithChown(0,0),sockets.WithChmod(0660))
+ if err != nil {
+ panic(err)
+ }
+ echoStr := "hello"
+
+ go func() {
+ for {
+ conn, err := l.Accept()
+ if err != nil {
+ return
+ }
+ conn.Write([]byte(echoStr))
+ conn.Close()
+ }
+ }()
+
+ conn, err := net.Dial("unix", path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := make([]byte, 5)
+ if _, err := conn.Read(buf); err != nil {
+ panic(err)
+ } else if string(buf) != echoStr {
+ panic(fmt.Errorf("msg may lost"))
+ }
+ }
+*/
+package sockets
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+// SockOption sets up socket file's creating option
+type SockOption func(string) error
+
+// WithChown modifies the socket file's uid and gid
+func WithChown(uid, gid int) SockOption {
+ return func(path string) error {
+ if err := os.Chown(path, uid, gid); err != nil {
+ return err
+ }
+ return nil
+ }
+}
+
+// WithChmod modifies socket file's access mode.
+func WithChmod(mask os.FileMode) SockOption {
+ return func(path string) error {
+ if err := os.Chmod(path, mask); err != nil {
+ return err
+ }
+ return nil
+ }
+}
+
+// NewUnixSocketWithOpts creates a unix socket with the specified options.
+// By default, socket permissions are 0000 (i.e.: no access for anyone); pass
+// WithChmod() and WithChown() to set the desired ownership and permissions.
+//
+// This function temporarily changes the system's "umask" to 0777 to work around
+// a race condition between creating the socket and setting its permissions. While
+// this should only be for a short duration, it may affect other processes that
+// create files/directories during that period.
+func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) {
+ if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ // net.Listen does not allow for permissions to be set. As a result, when
+ // specifying custom permissions ("WithChmod()"), there is a short time
+ // between creating the socket and applying the permissions, during which
+ // the socket permissions are Less restrictive than desired.
+ //
+ // To work around this limitation of net.Listen(), we temporarily set the
+ // umask to 0777, which forces the socket to be created with 000 permissions
+ // (i.e.: no access for anyone). After that, WithChmod() must be used to set
+ // the desired permissions.
+ //
+ // We don't use "defer" here, to reset the umask to its original value as soon
+ // as possible. Ideally we'd be able to detect if WithChmod() was passed as
+ // an option, and skip changing umask if default permissions are used.
+ origUmask := syscall.Umask(0o777)
+ l, err := net.Listen("unix", path)
+ syscall.Umask(origUmask)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, op := range opts {
+ if err := op(path); err != nil {
+ _ = l.Close()
+ return nil, err
+ }
+ }
+
+ return l, nil
+}
+
+// NewUnixSocket creates a unix socket with the specified path and group.
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
+ return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660))
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go
new file mode 100644
index 0000000..f84c624
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go
@@ -0,0 +1,16 @@
+package tlsconfig
+
+import (
+ "crypto/x509"
+ "runtime"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+ certpool, err := x509.SystemCertPool()
+ if err != nil && runtime.GOOS == "windows" {
+ return x509.NewCertPool(), nil
+ }
+ return certpool, err
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
new file mode 100644
index 0000000..606c98a
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -0,0 +1,261 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+//
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "os"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+ // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+ // creds will include exclusively the roots in that CA file. If no CA file is provided,
+ // the system pool will be used.
+ ExclusiveRootPools bool
+ MinVersion uint16
+ // If Passphrase is set, it will be used to decrypt a TLS private key
+ // if the key is encrypted.
+ //
+ // Deprecated: Use of encrypted TLS private keys has been deprecated, and
+ // will be removed in a future release. Golang has deprecated support for
+ // legacy PEM encryption (as specified in RFC 1423), as it is insecure by
+ // design (see https://go-review.googlesource.com/c/go/+/264159).
+ Passphrase string
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
+func ServerDefault(ops ...func(*tls.Config)) *tls.Config {
+ tlsConfig := &tls.Config{
+ // Avoid fallback by default to SSL protocols < TLS1.2
+ MinVersion: tls.VersionTLS12,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+ }
+
+ for _, op := range ops {
+ op(tlsConfig)
+ }
+
+ return tlsConfig
+}
+
+// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
+func ClientDefault(ops ...func(*tls.Config)) *tls.Config {
+ tlsConfig := &tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+ }
+
+ for _, op := range ops {
+ op(tlsConfig)
+ }
+
+ return tlsConfig
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ var (
+ certPool *x509.CertPool
+ err error
+ )
+ if exclusivePool {
+ certPool = x509.NewCertPool()
+ } else {
+ certPool, err = SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read system certificates: %v", err)
+ }
+ }
+ pemData, err := os.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pemData) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ return certPool, nil
+}
+
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+ tls.VersionTLS10: {},
+ tls.VersionTLS11: {},
+ tls.VersionTLS12: {},
+ tls.VersionTLS13: {},
+}
+
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+ _, ok := allTLSVersions[version]
+ return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+ if options.MinVersion > 0 {
+ if !isValidMinVersion(options.MinVersion) {
+ return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion)
+ }
+ if options.MinVersion < config.MinVersion {
+ return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+ }
+ config.MinVersion = options.MinVersion
+ }
+
+ return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when trying to decrypt a TLS private key.
+//
+// Deprecated: Use of encrypted TLS private keys has been deprecated, and
+// will be removed in a future release. Golang has deprecated support for
+// legacy PEM encryption (as specified in RFC 1423), as it is insecure by
+// design (see https://go-review.googlesource.com/c/go/+/264159).
+func IsErrEncryptedKey(err error) bool {
+ return errors.Is(err, x509.IncorrectPasswordError)
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+ // this section makes some small changes to code from notary/tuf/utils/x509.go
+ pemBlock, _ := pem.Decode(keyBytes)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("no valid private key found")
+ }
+
+ var err error
+ if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated)
+ keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated)
+ if err != nil {
+ return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err)
+ }
+ keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+ }
+
+ return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+ if options.CertFile == "" && options.KeyFile == "" {
+ return nil, nil
+ }
+
+ cert, err := os.ReadFile(options.CertFile)
+ if err != nil {
+ return nil, err
+ }
+
+ prKeyBytes, err := os.ReadFile(options.KeyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return []tls.Certificate{tlsCert}, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault()
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ tlsCerts, err := getCert(options)
+ if err != nil {
+ return nil, fmt.Errorf("could not load X509 key pair: %w", err)
+ }
+ tlsConfig.Certificates = tlsCerts
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault()
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
new file mode 100644
index 0000000..a82f9fa
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
@@ -0,0 +1,14 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 0000000..9ea86d7
--- /dev/null
+++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 0000000..4aac7c7
--- /dev/null
+++ b/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,46 @@
+# go-units maintainers file
+#
+# This file describes who runs the docker/go-units project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "akihirosuda",
+ "dnephin",
+ "thajeztah",
+ "vdemeester",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.akihirosuda]
+ Name = "Akihiro Suda"
+ Email = "akihiro.suda.cz@hco.ntt.co.jp"
+ GitHub = "AkihiroSuda"
+
+ [people.dnephin]
+ Name = "Daniel Nephin"
+ Email = "dnephin@gmail.com"
+ GitHub = "dnephin"
+
+ [people.thajeztah]
+ Name = "Sebastiaan van Stijn"
+ Email = "github@gone.nl"
+ GitHub = "thaJeztah"
+
+ [people.vdemeester]
+ Name = "Vincent Demeester"
+ Email = "vincent@sbr.pm"
+ GitHub = "vdemeester" \ No newline at end of file
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 0000000..4f70a4e
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,16 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc.
+
+go-units is licensed under the Apache License, Version 2.0.
+See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 0000000..af9d605
--- /dev/null
+++ b/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get golang.org/x/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000..48dd874
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,35 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds == 1 {
+ return "1 second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours() + 0.5); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*2 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 0000000..c245a89
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,154 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[byte]int64
+
+var (
+ decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
+ binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
+)
+
+var (
+ decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+)
+
+func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
+ i := 0
+ unitsLimit := len(_map) - 1
+ for size >= base && i < unitsLimit {
+ size = size / base
+ i++
+ }
+ return size, _map[i]
+}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ size, unit := getSizeAndUnit(size, base, _map)
+ return fmt.Sprintf(format, size, unit)
+}
+
+// HumanSizeWithPrecision allows the size to be in any precision,
+// instead of 4 digit precision used in units.HumanSize.
+func HumanSizeWithPrecision(size float64, precision int) string {
+ size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs)
+ return fmt.Sprintf("%.*g%s", precision, size, unit)
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return HumanSizeWithPrecision(size, 4)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ // TODO: rewrite to use strings.Cut if there's a space
+ // once Go < 1.18 is deprecated.
+ sep := strings.LastIndexAny(sizeStr, "01234567890. ")
+ if sep == -1 {
+ // There should be at least a digit.
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+ var num, sfx string
+ if sizeStr[sep] != ' ' {
+ num = sizeStr[:sep+1]
+ sfx = sizeStr[sep+1:]
+ } else {
+ // Omit the space separator.
+ num = sizeStr[:sep]
+ sfx = sizeStr[sep+1:]
+ }
+
+ size, err := strconv.ParseFloat(num, 64)
+ if err != nil {
+ return -1, err
+ }
+ // Backward compatibility: reject negative sizes.
+ if size < 0 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ if len(sfx) == 0 {
+ return int64(size), nil
+ }
+
+ // Process the suffix.
+
+ if len(sfx) > 3 { // Too long.
+ goto badSuffix
+ }
+ sfx = strings.ToLower(sfx)
+ // Trivial case: b suffix.
+ if sfx[0] == 'b' {
+ if len(sfx) > 1 { // no extra characters allowed after b.
+ goto badSuffix
+ }
+ return int64(size), nil
+ }
+ // A suffix from the map.
+ if mul, ok := uMap[sfx[0]]; ok {
+ size *= float64(mul)
+ } else {
+ goto badSuffix
+ }
+
+ // The suffix may have extra "b" or "ib" (e.g. KiB or MB).
+ switch {
+ case len(sfx) == 2 && sfx[1] != 'b':
+ goto badSuffix
+ case len(sfx) == 3 && sfx[1:] != "ib":
+ goto badSuffix
+ }
+
+ return int64(size), nil
+
+badSuffix:
+ return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000..fca0400
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,123 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if *hard != -1 {
+ if soft == -1 {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard)
+ }
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}