From 4b2d609a0efcc1d9b2f1a08f954d067ad1d9cd1e Mon Sep 17 00:00:00 2001 From: mo khan Date: Wed, 14 May 2025 13:18:54 -0600 Subject: test: use playwright to test out an OIDC login --- vendor/dario.cat/mergo/FUNDING.json | 7 + vendor/dario.cat/mergo/README.md | 5 - vendor/dario.cat/mergo/SECURITY.md | 4 +- vendor/github.com/Azure/go-ansiterm/SECURITY.md | 41 + .../Azure/go-ansiterm/osc_string_state.go | 18 +- .../github.com/deckarep/golang-set/v2/.gitignore | 23 + vendor/github.com/deckarep/golang-set/v2/LICENSE | 22 + vendor/github.com/deckarep/golang-set/v2/README.md | 190 + .../github.com/deckarep/golang-set/v2/iterator.go | 58 + .../deckarep/golang-set/v2/new_improved.jpeg | Bin 0 -> 120935 bytes vendor/github.com/deckarep/golang-set/v2/set.go | 255 ++ vendor/github.com/deckarep/golang-set/v2/sorted.go | 42 + .../deckarep/golang-set/v2/threadsafe.go | 299 ++ .../deckarep/golang-set/v2/threadunsafe.go | 332 ++ vendor/github.com/docker/docker/AUTHORS | 4 + vendor/github.com/docker/docker/api/common.go | 2 +- vendor/github.com/docker/docker/api/swagger.yaml | 93 +- .../docker/docker/api/types/image/image_inspect.go | 5 +- .../docker/docker/api/types/image/opts.go | 5 + .../docker/docker/api/types/registry/registry.go | 28 +- .../docker/docker/api/types/system/info.go | 11 +- .../docker/docker/client/container_commit.go | 2 +- .../docker/docker/client/image_create.go | 2 +- .../docker/docker/client/image_inspect.go | 11 + .../docker/docker/client/image_inspect_opts.go | 12 + .../github.com/docker/docker/client/image_pull.go | 2 +- .../github.com/docker/docker/client/image_push.go | 5 +- .../github.com/docker/docker/client/image_tag.go | 2 +- vendor/github.com/docker/docker/client/request.go | 2 +- .../docker/docker/pkg/archive/archive.go | 1507 ------- .../docker/pkg/archive/archive_deprecated.go | 259 ++ .../docker/docker/pkg/archive/archive_linux.go | 107 - .../docker/docker/pkg/archive/archive_other.go | 7 - .../docker/docker/pkg/archive/archive_unix.go | 126 - .../docker/docker/pkg/archive/archive_windows.go | 69 - .../docker/docker/pkg/archive/changes.go | 430 -- .../docker/pkg/archive/changes_deprecated.go | 56 + .../docker/docker/pkg/archive/changes_linux.go | 281 -- .../docker/docker/pkg/archive/changes_other.go | 95 - .../docker/docker/pkg/archive/changes_unix.go | 43 - .../docker/docker/pkg/archive/changes_windows.go | 33 - .../github.com/docker/docker/pkg/archive/copy.go | 497 --- .../docker/docker/pkg/archive/copy_deprecated.go | 130 + .../docker/docker/pkg/archive/copy_unix.go | 11 - .../docker/docker/pkg/archive/copy_windows.go | 9 - .../docker/docker/pkg/archive/dev_freebsd.go | 7 - .../docker/docker/pkg/archive/dev_unix.go | 9 - .../github.com/docker/docker/pkg/archive/diff.go | 258 -- .../docker/docker/pkg/archive/diff_deprecated.go | 37 + .../docker/docker/pkg/archive/diff_unix.go | 21 - .../docker/docker/pkg/archive/diff_windows.go | 6 - .../github.com/docker/docker/pkg/archive/path.go | 20 - .../docker/docker/pkg/archive/path_deprecated.go | 10 + .../docker/docker/pkg/archive/path_unix.go | 9 - .../docker/docker/pkg/archive/path_windows.go | 22 - .../github.com/docker/docker/pkg/archive/time.go | 38 - .../docker/docker/pkg/archive/time_nonwindows.go | 41 - .../docker/docker/pkg/archive/time_windows.go | 32 - .../github.com/docker/docker/pkg/archive/utils.go | 42 + .../docker/docker/pkg/archive/whiteouts.go | 23 - .../docker/pkg/archive/whiteouts_deprecated.go | 10 + .../github.com/docker/docker/pkg/archive/wrap.go | 59 - .../docker/docker/pkg/archive/wrap_deprecated.go | 14 + .../docker/docker/pkg/archive/xattr_supported.go | 52 - .../docker/pkg/archive/xattr_supported_linux.go | 5 - .../docker/pkg/archive/xattr_supported_unix.go | 7 - .../docker/docker/pkg/archive/xattr_unsupported.go | 11 - .../docker/docker/pkg/idtools/idtools.go | 78 +- .../docker/docker/pkg/idtools/idtools_unix.go | 166 - .../docker/docker/pkg/idtools/idtools_windows.go | 12 - vendor/github.com/ebitengine/purego/dlfcn.go | 8 +- .../github.com/ebitengine/purego/dlfcn_darwin.go | 5 - .../ebitengine/purego/internal/fakecgo/symbols.go | 40 +- vendor/github.com/go-ole/go-ole/SECURITY.md | 13 + vendor/github.com/go-ole/go-ole/appveyor.yml | 50 +- vendor/github.com/go-ole/go-ole/com.go | 42 + .../github.com/go-ole/go-ole/idispatch_windows.go | 3 +- vendor/github.com/go-ole/go-ole/variant.go | 2 +- vendor/github.com/go-stack/stack/LICENSE.md | 21 + vendor/github.com/go-stack/stack/README.md | 38 + vendor/github.com/go-stack/stack/stack.go | 400 ++ vendor/github.com/lufia/plan9stats/README.md | 8 + vendor/github.com/lufia/plan9stats/cpu.go | 5 +- vendor/github.com/lufia/plan9stats/disk.go | 116 + vendor/github.com/lufia/plan9stats/host.go | 82 +- vendor/github.com/lufia/plan9stats/int.go | 9 + vendor/github.com/moby/go-archive/.gitattributes | 2 + vendor/github.com/moby/go-archive/.gitignore | 1 + vendor/github.com/moby/go-archive/.golangci.yml | 33 + vendor/github.com/moby/go-archive/LICENSE | 202 + vendor/github.com/moby/go-archive/archive.go | 1169 +++++ vendor/github.com/moby/go-archive/archive_linux.go | 107 + vendor/github.com/moby/go-archive/archive_other.go | 7 + vendor/github.com/moby/go-archive/archive_unix.go | 86 + .../github.com/moby/go-archive/archive_windows.go | 62 + vendor/github.com/moby/go-archive/changes.go | 430 ++ vendor/github.com/moby/go-archive/changes_linux.go | 274 ++ vendor/github.com/moby/go-archive/changes_other.go | 95 + vendor/github.com/moby/go-archive/changes_unix.go | 43 + .../github.com/moby/go-archive/changes_windows.go | 33 + .../moby/go-archive/compression/compression.go | 263 ++ .../go-archive/compression/compression_detect.go | 65 + vendor/github.com/moby/go-archive/copy.go | 496 +++ vendor/github.com/moby/go-archive/copy_unix.go | 11 + vendor/github.com/moby/go-archive/copy_windows.go | 9 + vendor/github.com/moby/go-archive/dev_freebsd.go | 9 + vendor/github.com/moby/go-archive/dev_unix.go | 9 + vendor/github.com/moby/go-archive/diff.go | 261 ++ vendor/github.com/moby/go-archive/diff_unix.go | 21 + vendor/github.com/moby/go-archive/diff_windows.go | 6 + vendor/github.com/moby/go-archive/path.go | 20 + vendor/github.com/moby/go-archive/path_unix.go | 9 + vendor/github.com/moby/go-archive/path_windows.go | 22 + .../moby/go-archive/tarheader/tarheader.go | 67 + .../moby/go-archive/tarheader/tarheader_unix.go | 46 + .../moby/go-archive/tarheader/tarheader_windows.go | 12 + vendor/github.com/moby/go-archive/time.go | 38 + .../github.com/moby/go-archive/time_nonwindows.go | 41 + vendor/github.com/moby/go-archive/time_windows.go | 32 + vendor/github.com/moby/go-archive/whiteouts.go | 23 + vendor/github.com/moby/go-archive/wrap.go | 59 + .../github.com/moby/go-archive/xattr_supported.go | 52 + .../moby/go-archive/xattr_supported_linux.go | 5 + .../moby/go-archive/xattr_supported_unix.go | 7 + .../moby/go-archive/xattr_unsupported.go | 11 + .../moby/sys/sequential/sequential_unix.go | 27 +- .../moby/sys/sequential/sequential_windows.go | 89 +- vendor/github.com/moby/sys/user/idtools.go | 141 + vendor/github.com/moby/sys/user/idtools_unix.go | 143 + vendor/github.com/moby/sys/user/idtools_windows.go | 13 + vendor/github.com/moby/sys/user/user.go | 1 - vendor/github.com/moby/term/term_unix.go | 2 +- .../playwright-go/.gitattributes | 3 + .../playwright-community/playwright-go/.gitignore | 34 + .../playwright-community/playwright-go/.gitmodules | 3 + .../playwright-go/.golangci.yaml | 6 + .../playwright-community/playwright-go/.nojekyll | 0 .../playwright-community/playwright-go/404.html | 25 + .../playwright-go/CONTRIBUTING.md | 39 + .../playwright-go/Dockerfile.example | 25 + .../playwright-community/playwright-go/LICENSE | 21 + .../playwright-community/playwright-go/README.md | 148 + .../playwright-community/playwright-go/_config.yml | 21 + .../playwright-go/apiresponse_assertions.go | 75 + .../playwright-community/playwright-go/artifact.go | 70 + .../playwright-go/assertions.go | 146 + .../playwright-go/binding_call.go | 87 + .../playwright-community/playwright-go/browser.go | 274 ++ .../playwright-go/browser_context.go | 914 ++++ .../playwright-go/browser_type.go | 181 + .../playwright-go/cdp_session.go | 38 + .../playwright-community/playwright-go/channel.go | 92 + .../playwright-go/channel_owner.go | 122 + .../playwright-community/playwright-go/clock.go | 111 + .../playwright-go/cmd/playwright/main.go | 25 + .../playwright-go/connection.go | 401 ++ .../playwright-go/console_message.go | 47 + .../playwright-community/playwright-go/dialog.go | 48 + .../playwright-community/playwright-go/download.go | 56 + .../playwright-go/element_handle.go | 403 ++ .../playwright-community/playwright-go/errors.go | 58 + .../playwright-go/event_emitter.go | 163 + .../playwright-community/playwright-go/fetch.go | 451 ++ .../playwright-go/file_chooser.go | 44 + .../playwright-community/playwright-go/frame.go | 792 ++++ .../playwright-go/frame_locator.go | 130 + .../playwright-go/generated-enums.go | 404 ++ .../playwright-go/generated-interfaces.go | 4658 ++++++++++++++++++++ .../playwright-go/generated-structs.go | 4364 ++++++++++++++++++ .../playwright-community/playwright-go/glob.go | 170 + .../playwright-go/har_router.go | 110 + .../playwright-community/playwright-go/helpers.go | 628 +++ .../playwright-community/playwright-go/input.go | 117 + .../playwright-go/input_files_helper.go | 202 + .../playwright-go/internal/safe/map.go | 90 + .../playwright-go/js_handle.go | 421 ++ .../playwright-community/playwright-go/jsonPipe.go | 64 + .../playwright-go/local_utils.go | 165 + .../playwright-community/playwright-go/locator.go | 914 ++++ .../playwright-go/locator_assertions.go | 568 +++ .../playwright-go/locator_helpers.go | 133 + .../playwright-community/playwright-go/network.go | 62 + .../playwright-go/objectFactory.go | 74 + .../playwright-community/playwright-go/page.go | 1384 ++++++ .../playwright-go/page_assertions.go | 70 + .../playwright-go/playwright.go | 64 + .../playwright-community/playwright-go/request.go | 274 ++ .../playwright-community/playwright-go/response.go | 162 + .../playwright-community/playwright-go/route.go | 270 ++ .../playwright-community/playwright-go/run.go | 409 ++ .../playwright-community/playwright-go/run_unix.go | 10 + .../playwright-community/playwright-go/run_win.go | 10 + .../playwright-go/selectors.go | 88 + .../playwright-community/playwright-go/stream.go | 68 + .../playwright-community/playwright-go/tracing.go | 164 + .../playwright-go/transport.go | 141 + .../playwright-go/type_helpers.go | 72 + .../playwright-community/playwright-go/video.go | 97 + .../playwright-community/playwright-go/waiter.go | 181 + .../playwright-go/web_error.go | 21 + .../playwright-go/websocket.go | 134 + .../playwright-go/websocket_route.go | 220 + .../playwright-community/playwright-go/worker.go | 78 + .../playwright-go/writable_stream.go | 44 + vendor/github.com/power-devops/perfstat/config.go | 1 + vendor/github.com/power-devops/perfstat/cpustat.go | 40 + .../github.com/power-devops/perfstat/diskstat.go | 1 + vendor/github.com/power-devops/perfstat/doc.go | 13 +- vendor/github.com/power-devops/perfstat/fsstat.go | 1 + vendor/github.com/power-devops/perfstat/helpers.go | 57 +- .../github.com/power-devops/perfstat/lparstat.go | 14 + vendor/github.com/power-devops/perfstat/lvmstat.go | 1 + vendor/github.com/power-devops/perfstat/memstat.go | 1 + vendor/github.com/power-devops/perfstat/netstat.go | 1 + .../github.com/power-devops/perfstat/procstat.go | 1 + vendor/github.com/power-devops/perfstat/sysconf.go | 1 + .../github.com/power-devops/perfstat/systemcfg.go | 59 +- .../github.com/power-devops/perfstat/types_disk.go | 4 +- .../github.com/power-devops/perfstat/types_fs.go | 2 +- .../github.com/power-devops/perfstat/types_lpar.go | 61 + vendor/github.com/power-devops/perfstat/uptime.go | 1 + .../shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go | 16 +- .../shirou/gopsutil/v4/cpu/cpu_darwin.go | 15 +- .../shirou/gopsutil/v4/cpu/cpu_dragonfly.go | 11 +- .../shirou/gopsutil/v4/cpu/cpu_freebsd.go | 6 +- .../github.com/shirou/gopsutil/v4/cpu/cpu_linux.go | 2 +- .../shirou/gopsutil/v4/cpu/cpu_netbsd.go | 8 +- .../shirou/gopsutil/v4/cpu/cpu_openbsd.go | 11 +- .../github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go | 7 +- .../shirou/gopsutil/v4/cpu/cpu_solaris.go | 36 +- .../shirou/gopsutil/v4/cpu/cpu_windows.go | 28 +- .../shirou/gopsutil/v4/internal/common/binary.go | 8 +- .../shirou/gopsutil/v4/internal/common/common.go | 11 +- .../gopsutil/v4/internal/common/common_darwin.go | 11 +- .../gopsutil/v4/internal/common/common_linux.go | 20 +- .../gopsutil/v4/internal/common/common_testing.go | 14 + .../gopsutil/v4/internal/common/common_windows.go | 22 +- .../shirou/gopsutil/v4/mem/mem_darwin.go | 6 +- .../shirou/gopsutil/v4/mem/mem_fallback.go | 6 +- .../shirou/gopsutil/v4/mem/mem_freebsd.go | 11 +- .../github.com/shirou/gopsutil/v4/mem/mem_linux.go | 2 +- .../shirou/gopsutil/v4/mem/mem_netbsd.go | 4 +- .../shirou/gopsutil/v4/mem/mem_openbsd.go | 7 +- .../github.com/shirou/gopsutil/v4/mem/mem_plan9.go | 3 +- .../shirou/gopsutil/v4/mem/mem_solaris.go | 24 +- .../shirou/gopsutil/v4/mem/mem_windows.go | 9 +- vendor/github.com/shirou/gopsutil/v4/net/net.go | 6 +- .../github.com/shirou/gopsutil/v4/net/net_aix.go | 25 +- .../shirou/gopsutil/v4/net/net_aix_cgo.go | 4 +- .../shirou/gopsutil/v4/net/net_aix_nocgo.go | 8 +- .../shirou/gopsutil/v4/net/net_darwin.go | 22 +- .../shirou/gopsutil/v4/net/net_fallback.go | 14 +- .../shirou/gopsutil/v4/net/net_freebsd.go | 12 +- .../github.com/shirou/gopsutil/v4/net/net_linux.go | 23 +- .../shirou/gopsutil/v4/net/net_openbsd.go | 24 +- .../shirou/gopsutil/v4/net/net_solaris.go | 17 +- .../github.com/shirou/gopsutil/v4/net/net_unix.go | 6 +- .../shirou/gopsutil/v4/net/net_windows.go | 50 +- .../shirou/gopsutil/v4/process/process.go | 29 +- .../shirou/gopsutil/v4/process/process_bsd.go | 26 +- .../shirou/gopsutil/v4/process/process_darwin.go | 52 +- .../shirou/gopsutil/v4/process/process_fallback.go | 86 +- .../shirou/gopsutil/v4/process/process_freebsd.go | 32 +- .../shirou/gopsutil/v4/process/process_linux.go | 4 +- .../shirou/gopsutil/v4/process/process_openbsd.go | 44 +- .../shirou/gopsutil/v4/process/process_plan9.go | 86 +- .../shirou/gopsutil/v4/process/process_posix.go | 4 +- .../shirou/gopsutil/v4/process/process_solaris.go | 63 +- .../shirou/gopsutil/v4/process/process_windows.go | 146 +- .../gopsutil/v4/process/process_windows_32bit.go | 74 +- .../gopsutil/v4/process/process_windows_64bit.go | 36 +- vendor/github.com/tklauser/go-sysconf/.cirrus.yml | 10 +- .../github.com/tklauser/go-sysconf/sysconf_bsd.go | 1 - .../tklauser/go-sysconf/sysconf_darwin.go | 37 +- .../tklauser/go-sysconf/sysconf_generic.go | 1 - .../tklauser/go-sysconf/sysconf_linux.go | 20 +- .../tklauser/go-sysconf/sysconf_netbsd.go | 18 +- .../tklauser/go-sysconf/sysconf_posix.go | 1 - .../tklauser/go-sysconf/sysconf_unsupported.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_darwin.go | 2 - .../tklauser/go-sysconf/zsysconf_defs_dragonfly.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_freebsd.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_linux.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_netbsd.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_openbsd.go | 1 - .../tklauser/go-sysconf/zsysconf_defs_solaris.go | 1 - .../go-sysconf/zsysconf_values_freebsd_386.go | 1 - .../go-sysconf/zsysconf_values_freebsd_amd64.go | 1 - .../go-sysconf/zsysconf_values_freebsd_arm.go | 1 - .../go-sysconf/zsysconf_values_freebsd_arm64.go | 1 - .../go-sysconf/zsysconf_values_freebsd_riscv64.go | 1 - .../go-sysconf/zsysconf_values_linux_386.go | 1 - .../go-sysconf/zsysconf_values_linux_amd64.go | 1 - .../go-sysconf/zsysconf_values_linux_arm.go | 1 - .../go-sysconf/zsysconf_values_linux_arm64.go | 1 - .../go-sysconf/zsysconf_values_linux_loong64.go | 1 - .../go-sysconf/zsysconf_values_linux_mips.go | 1 - .../go-sysconf/zsysconf_values_linux_mips64.go | 1 - .../go-sysconf/zsysconf_values_linux_mips64le.go | 1 - .../go-sysconf/zsysconf_values_linux_mipsle.go | 1 - .../go-sysconf/zsysconf_values_linux_ppc64.go | 1 - .../go-sysconf/zsysconf_values_linux_ppc64le.go | 1 - .../go-sysconf/zsysconf_values_linux_riscv64.go | 1 - .../go-sysconf/zsysconf_values_linux_s390x.go | 1 - .../go-sysconf/zsysconf_values_netbsd_386.go | 1 - .../go-sysconf/zsysconf_values_netbsd_amd64.go | 1 - .../go-sysconf/zsysconf_values_netbsd_arm.go | 1 - .../go-sysconf/zsysconf_values_netbsd_arm64.go | 1 - vendor/github.com/tklauser/numcpus/.cirrus.yml | 20 +- vendor/github.com/tklauser/numcpus/numcpus.go | 23 + vendor/github.com/tklauser/numcpus/numcpus_bsd.go | 1 - .../github.com/tklauser/numcpus/numcpus_linux.go | 104 +- .../tklauser/numcpus/numcpus_list_unsupported.go | 33 + .../github.com/tklauser/numcpus/numcpus_solaris.go | 1 - .../tklauser/numcpus/numcpus_unsupported.go | 1 - .../instrumentation/net/http/otelhttp/client.go | 6 +- .../instrumentation/net/http/otelhttp/common.go | 7 - .../instrumentation/net/http/otelhttp/handler.go | 58 +- .../http/otelhttp/internal/request/body_wrapper.go | 3 + .../net/http/otelhttp/internal/request/gen.go | 10 + .../internal/request/resp_writer_wrapper.go | 14 +- .../net/http/otelhttp/internal/semconv/env.go | 204 +- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../net/http/otelhttp/internal/semconv/httpconv.go | 225 +- .../net/http/otelhttp/internal/semconv/util.go | 28 +- .../net/http/otelhttp/internal/semconv/v1.20.0.go | 135 +- .../http/otelhttp/internal/semconvutil/netconv.go | 11 +- .../net/http/otelhttp/start_time_context.go | 29 + .../instrumentation/net/http/otelhttp/transport.go | 58 +- .../instrumentation/net/http/otelhttp/version.go | 2 +- vendor/modules.txt | 58 +- 331 files changed, 31134 insertions(+), 5165 deletions(-) create mode 100644 vendor/dario.cat/mergo/FUNDING.json create mode 100644 vendor/github.com/Azure/go-ansiterm/SECURITY.md create mode 100644 vendor/github.com/deckarep/golang-set/v2/.gitignore create mode 100644 vendor/github.com/deckarep/golang-set/v2/LICENSE create mode 100644 vendor/github.com/deckarep/golang-set/v2/README.md create mode 100644 vendor/github.com/deckarep/golang-set/v2/iterator.go create mode 100644 vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg create mode 100644 vendor/github.com/deckarep/golang-set/v2/set.go create mode 100644 vendor/github.com/deckarep/golang-set/v2/sorted.go create mode 100644 vendor/github.com/deckarep/golang-set/v2/threadsafe.go create mode 100644 vendor/github.com/deckarep/golang-set/v2/threadunsafe.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/dev_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/path.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/path_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/path_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/path_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/utils.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/xattr_supported.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/go-ole/go-ole/SECURITY.md create mode 100644 vendor/github.com/go-stack/stack/LICENSE.md create mode 100644 vendor/github.com/go-stack/stack/README.md create mode 100644 vendor/github.com/go-stack/stack/stack.go create mode 100644 vendor/github.com/lufia/plan9stats/disk.go create mode 100644 vendor/github.com/moby/go-archive/.gitattributes create mode 100644 vendor/github.com/moby/go-archive/.gitignore create mode 100644 vendor/github.com/moby/go-archive/.golangci.yml create mode 100644 vendor/github.com/moby/go-archive/LICENSE create mode 100644 vendor/github.com/moby/go-archive/archive.go create mode 100644 vendor/github.com/moby/go-archive/archive_linux.go create mode 100644 vendor/github.com/moby/go-archive/archive_other.go create mode 100644 vendor/github.com/moby/go-archive/archive_unix.go create mode 100644 vendor/github.com/moby/go-archive/archive_windows.go create mode 100644 vendor/github.com/moby/go-archive/changes.go create mode 100644 vendor/github.com/moby/go-archive/changes_linux.go create mode 100644 vendor/github.com/moby/go-archive/changes_other.go create mode 100644 vendor/github.com/moby/go-archive/changes_unix.go create mode 100644 vendor/github.com/moby/go-archive/changes_windows.go create mode 100644 vendor/github.com/moby/go-archive/compression/compression.go create mode 100644 vendor/github.com/moby/go-archive/compression/compression_detect.go create mode 100644 vendor/github.com/moby/go-archive/copy.go create mode 100644 vendor/github.com/moby/go-archive/copy_unix.go create mode 100644 vendor/github.com/moby/go-archive/copy_windows.go create mode 100644 vendor/github.com/moby/go-archive/dev_freebsd.go create mode 100644 vendor/github.com/moby/go-archive/dev_unix.go create mode 100644 vendor/github.com/moby/go-archive/diff.go create mode 100644 vendor/github.com/moby/go-archive/diff_unix.go create mode 100644 vendor/github.com/moby/go-archive/diff_windows.go create mode 100644 vendor/github.com/moby/go-archive/path.go create mode 100644 vendor/github.com/moby/go-archive/path_unix.go create mode 100644 vendor/github.com/moby/go-archive/path_windows.go create mode 100644 vendor/github.com/moby/go-archive/tarheader/tarheader.go create mode 100644 vendor/github.com/moby/go-archive/tarheader/tarheader_unix.go create mode 100644 vendor/github.com/moby/go-archive/tarheader/tarheader_windows.go create mode 100644 vendor/github.com/moby/go-archive/time.go create mode 100644 vendor/github.com/moby/go-archive/time_nonwindows.go create mode 100644 vendor/github.com/moby/go-archive/time_windows.go create mode 100644 vendor/github.com/moby/go-archive/whiteouts.go create mode 100644 vendor/github.com/moby/go-archive/wrap.go create mode 100644 vendor/github.com/moby/go-archive/xattr_supported.go create mode 100644 vendor/github.com/moby/go-archive/xattr_supported_linux.go create mode 100644 vendor/github.com/moby/go-archive/xattr_supported_unix.go create mode 100644 vendor/github.com/moby/go-archive/xattr_unsupported.go create mode 100644 vendor/github.com/moby/sys/user/idtools.go create mode 100644 vendor/github.com/moby/sys/user/idtools_unix.go create mode 100644 vendor/github.com/moby/sys/user/idtools_windows.go create mode 100644 vendor/github.com/playwright-community/playwright-go/.gitattributes create mode 100644 vendor/github.com/playwright-community/playwright-go/.gitignore create mode 100644 vendor/github.com/playwright-community/playwright-go/.gitmodules create mode 100644 vendor/github.com/playwright-community/playwright-go/.golangci.yaml create mode 100644 vendor/github.com/playwright-community/playwright-go/.nojekyll create mode 100644 vendor/github.com/playwright-community/playwright-go/404.html create mode 100644 vendor/github.com/playwright-community/playwright-go/CONTRIBUTING.md create mode 100644 vendor/github.com/playwright-community/playwright-go/Dockerfile.example create mode 100644 vendor/github.com/playwright-community/playwright-go/LICENSE create mode 100644 vendor/github.com/playwright-community/playwright-go/README.md create mode 100644 vendor/github.com/playwright-community/playwright-go/_config.yml create mode 100644 vendor/github.com/playwright-community/playwright-go/apiresponse_assertions.go create mode 100644 vendor/github.com/playwright-community/playwright-go/artifact.go create mode 100644 vendor/github.com/playwright-community/playwright-go/assertions.go create mode 100644 vendor/github.com/playwright-community/playwright-go/binding_call.go create mode 100644 vendor/github.com/playwright-community/playwright-go/browser.go create mode 100644 vendor/github.com/playwright-community/playwright-go/browser_context.go create mode 100644 vendor/github.com/playwright-community/playwright-go/browser_type.go create mode 100644 vendor/github.com/playwright-community/playwright-go/cdp_session.go create mode 100644 vendor/github.com/playwright-community/playwright-go/channel.go create mode 100644 vendor/github.com/playwright-community/playwright-go/channel_owner.go create mode 100644 vendor/github.com/playwright-community/playwright-go/clock.go create mode 100644 vendor/github.com/playwright-community/playwright-go/cmd/playwright/main.go create mode 100644 vendor/github.com/playwright-community/playwright-go/connection.go create mode 100644 vendor/github.com/playwright-community/playwright-go/console_message.go create mode 100644 vendor/github.com/playwright-community/playwright-go/dialog.go create mode 100644 vendor/github.com/playwright-community/playwright-go/download.go create mode 100644 vendor/github.com/playwright-community/playwright-go/element_handle.go create mode 100644 vendor/github.com/playwright-community/playwright-go/errors.go create mode 100644 vendor/github.com/playwright-community/playwright-go/event_emitter.go create mode 100644 vendor/github.com/playwright-community/playwright-go/fetch.go create mode 100644 vendor/github.com/playwright-community/playwright-go/file_chooser.go create mode 100644 vendor/github.com/playwright-community/playwright-go/frame.go create mode 100644 vendor/github.com/playwright-community/playwright-go/frame_locator.go create mode 100644 vendor/github.com/playwright-community/playwright-go/generated-enums.go create mode 100644 vendor/github.com/playwright-community/playwright-go/generated-interfaces.go create mode 100644 vendor/github.com/playwright-community/playwright-go/generated-structs.go create mode 100644 vendor/github.com/playwright-community/playwright-go/glob.go create mode 100644 vendor/github.com/playwright-community/playwright-go/har_router.go create mode 100644 vendor/github.com/playwright-community/playwright-go/helpers.go create mode 100644 vendor/github.com/playwright-community/playwright-go/input.go create mode 100644 vendor/github.com/playwright-community/playwright-go/input_files_helper.go create mode 100644 vendor/github.com/playwright-community/playwright-go/internal/safe/map.go create mode 100644 vendor/github.com/playwright-community/playwright-go/js_handle.go create mode 100644 vendor/github.com/playwright-community/playwright-go/jsonPipe.go create mode 100644 vendor/github.com/playwright-community/playwright-go/local_utils.go create mode 100644 vendor/github.com/playwright-community/playwright-go/locator.go create mode 100644 vendor/github.com/playwright-community/playwright-go/locator_assertions.go create mode 100644 vendor/github.com/playwright-community/playwright-go/locator_helpers.go create mode 100644 vendor/github.com/playwright-community/playwright-go/network.go create mode 100644 vendor/github.com/playwright-community/playwright-go/objectFactory.go create mode 100644 vendor/github.com/playwright-community/playwright-go/page.go create mode 100644 vendor/github.com/playwright-community/playwright-go/page_assertions.go create mode 100644 vendor/github.com/playwright-community/playwright-go/playwright.go create mode 100644 vendor/github.com/playwright-community/playwright-go/request.go create mode 100644 vendor/github.com/playwright-community/playwright-go/response.go create mode 100644 vendor/github.com/playwright-community/playwright-go/route.go create mode 100644 vendor/github.com/playwright-community/playwright-go/run.go create mode 100644 vendor/github.com/playwright-community/playwright-go/run_unix.go create mode 100644 vendor/github.com/playwright-community/playwright-go/run_win.go create mode 100644 vendor/github.com/playwright-community/playwright-go/selectors.go create mode 100644 vendor/github.com/playwright-community/playwright-go/stream.go create mode 100644 vendor/github.com/playwright-community/playwright-go/tracing.go create mode 100644 vendor/github.com/playwright-community/playwright-go/transport.go create mode 100644 vendor/github.com/playwright-community/playwright-go/type_helpers.go create mode 100644 vendor/github.com/playwright-community/playwright-go/video.go create mode 100644 vendor/github.com/playwright-community/playwright-go/waiter.go create mode 100644 vendor/github.com/playwright-community/playwright-go/web_error.go create mode 100644 vendor/github.com/playwright-community/playwright-go/websocket.go create mode 100644 vendor/github.com/playwright-community/playwright-go/websocket_route.go create mode 100644 vendor/github.com/playwright-community/playwright-go/worker.go create mode 100644 vendor/github.com/playwright-community/playwright-go/writable_stream.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_testing.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go (limited to 'vendor') diff --git a/vendor/dario.cat/mergo/FUNDING.json b/vendor/dario.cat/mergo/FUNDING.json new file mode 100644 index 0000000..0585e1f --- /dev/null +++ b/vendor/dario.cat/mergo/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930" + } + } +} diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 0b3c488..0e4a59a 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend * [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) * [go-micro/go-micro](https://github.com/go-micro/go-micro) * [grafana/loki](https://github.com/grafana/loki) -* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) * [masterminds/sprig](github.com/Masterminds/sprig) * [moby/moby](https://github.com/moby/moby) * [slackhq/nebula](https://github.com/slackhq/nebula) @@ -191,10 +190,6 @@ func main() { } ``` -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md index a5de61f..3788fcc 100644 --- a/vendor/dario.cat/mergo/SECURITY.md +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -4,8 +4,8 @@ | Version | Supported | | ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | +| 1.x.x | :white_check_mark: | +| < 1.0 | :x: | ## Security contact information diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md new file mode 100644 index 0000000..e138ec5 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 593b10a..194d5e9 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) { return nextState, err } - switch { - case isOscStringTerminator(b): + // There are several control characters and sequences which can + // terminate an OSC string. Most of them are handled by the baseState + // handler. The ANSI_BEL character is a special case which behaves as a + // terminator only for an OSC string. + if b == ANSI_BEL { return oscState.parser.ground, nil } return oscState, nil } - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/deckarep/golang-set/v2/.gitignore b/vendor/github.com/deckarep/golang-set/v2/.gitignore new file mode 100644 index 0000000..4eb156d --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +.idea \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/v2/LICENSE b/vendor/github.com/deckarep/golang-set/v2/LICENSE new file mode 100644 index 0000000..efd4827 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/v2/README.md b/vendor/github.com/deckarep/golang-set/v2/README.md new file mode 100644 index 0000000..bb691b1 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/README.md @@ -0,0 +1,190 @@ +![example workflow](https://github.com/deckarep/golang-set/actions/workflows/ci.yml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set/v2)](https://goreportcard.com/report/github.com/deckarep/golang-set/v2) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set/v2?status.svg)](http://godoc.org/github.com/deckarep/golang-set/v2) + +# golang-set + +The missing `generic` set collection for the Go language. Until Go has sets built-in...use this. + +## Psst +* Hi there, 👋! Do you use or have interest in the [Zig programming language](https://ziglang.org/) created by Andrew Kelley? If so, the golang-set project has a new sibling project: [ziglang-set](https://github.com/deckarep/ziglang-set)! Come check it out! + +## Update 12/3/2024 +* Packaged version: `2.7.0` fixes a long-standing bug with *JSON Unmarshaling*. A large refactor in the interest of performance +introduced this bug and there was no way around it but to revert the code back to how it was previously. The performance +difference was likely negligible to begin with. JSON Marshaling and Unmarshaling is now properly supported again without +needing to do workarounds. + +## Update 3/5/2023 +* Packaged version: `2.2.0` release includes a refactor to minimize pointer indirection, better method documentation standards and a few constructor convenience methods to increase ergonomics when appending items `Append` or creating a new set from an exist `Map`. +* supports `new generic` syntax +* Go `1.18.0` or higher +* Workflow tested on Go `1.20` + +![With Generics](new_improved.jpeg) + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set collection from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository and carry-on and to the rest that find this useful please contribute in helping me make it better by contributing with suggestions or PRs. + +## Install + +Use `go get` to install this package. + +```shell +go get github.com/deckarep/golang-set/v2 +``` + +## Features + +* *NEW* [Generics](https://go.dev/doc/tutorial/generics) based implementation (requires [Go 1.18](https://go.dev/blog/go1.18beta1) or higher) +* One common *interface* to both implementations + * a **non threadsafe** implementation favoring *performance* + * a **threadsafe** implementation favoring *concurrent* use +* Feature complete set implementation modeled after [Python's set implementation](https://docs.python.org/3/library/stdtypes.html#set). +* Exhaustive unit-test and benchmark suite + +## Trusted by + +This package is trusted by many companies and thousands of open-source packages. Here are just a few sample users of this package. + +* Notable projects/companies using this package + * Ethereum + * Docker + * 1Password + * Hashicorp + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=deckarep/golang-set&type=Date)](https://star-history.com/#deckarep/golang-set&Date) + + +## Usage + +The code below demonstrates how a Set collection can better manage data and actually minimize boilerplate and needless loops in code. This package now fully supports *generic* syntax so you are now able to instantiate a collection for any [comparable](https://flaviocopes.com/golang-comparing-values/) type object. + +What is considered comparable in Go? +* `Booleans`, `integers`, `strings`, `floats` or basically primitive types. +* `Pointers` +* `Arrays` +* `Structs` if *all of their fields* are also comparable independently + +Using this library is as simple as creating either a threadsafe or non-threadsafe set and providing a `comparable` type for instantiation of the collection. + +```go +// Syntax example, doesn't compile. +mySet := mapset.NewSet[T]() // where T is some concrete comparable type. + +// Therefore this code creates an int set +mySet := mapset.NewSet[int]() + +// Or perhaps you want a string set +mySet := mapset.NewSet[string]() + +type myStruct struct { + name string + age uint8 +} + +// Alternatively a set of structs +mySet := mapset.NewSet[myStruct]() + +// Lastly a set that can hold anything using the any or empty interface keyword: interface{}. This is effectively removes type safety. +mySet := mapset.NewSet[any]() +``` + +## Comprehensive Example + +```go +package main + +import ( + "fmt" + mapset "github.com/deckarep/golang-set/v2" +) + +func main() { + // Create a string-based set of required classes. + required := mapset.NewSet[string]() + required.Add("cooking") + required.Add("english") + required.Add("math") + required.Add("biology") + + // Create a string-based set of science classes. + sciences := mapset.NewSet[string]() + sciences.Add("biology") + sciences.Add("chemistry") + + // Create a string-based set of electives. + electives := mapset.NewSet[string]() + electives.Add("welding") + electives.Add("music") + electives.Add("automotive") + + // Create a string-based set of bonus programming classes. + bonus := mapset.NewSet[string]() + bonus.Add("beginner go") + bonus.Add("python for dummies") +} +``` + +Create a set of all unique classes. +Sets will *automatically* deduplicate the same data. + +```go + all := required + .Union(sciences) + .Union(electives) + .Union(bonus) + + fmt.Println(all) +``` + +Output: +```sh +Set{cooking, english, math, chemistry, welding, biology, music, automotive, beginner go, python for dummies} +``` + +Is cooking considered a science class? +```go +result := sciences.Contains("cooking") +fmt.Println(result) +``` + +Output: +```false +false +``` + +Show me all classes that are not science classes, since I don't enjoy science. +```go +notScience := all.Difference(sciences) +fmt.Println(notScience) +``` + +```sh +Set{ music, automotive, beginner go, python for dummies, cooking, english, math, welding } +``` + +Which science classes are also required classes? +```go +reqScience := sciences.Intersect(required) +``` + +Output: +```sh +Set{biology} +``` + +How many bonus classes do you offer? +```go +fmt.Println(bonus.Cardinality()) +``` +Output: +```sh +2 +``` + +Thanks for visiting! + +-deckarep diff --git a/vendor/github.com/deckarep/golang-set/v2/iterator.go b/vendor/github.com/deckarep/golang-set/v2/iterator.go new file mode 100644 index 0000000..fc14e70 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator[T comparable] struct { + C <-chan T + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator[T]) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator[T comparable]() (*Iterator[T], chan<- T, <-chan struct{}) { + itemChan := make(chan T) + stopChan := make(chan struct{}) + return &Iterator[T]{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg b/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg new file mode 100644 index 0000000..429752a Binary files /dev/null and b/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg differ diff --git a/vendor/github.com/deckarep/golang-set/v2/set.go b/vendor/github.com/deckarep/golang-set/v2/set.go new file mode 100644 index 0000000..292089d --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/set.go @@ -0,0 +1,255 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set[T comparable] interface { + // Add adds an element to the set. Returns whether + // the item was added. + Add(val T) bool + + // Append multiple elements to the set. Returns + // the number of elements added. + Append(val ...T) int + + // Cardinality returns the number of elements in the set. + Cardinality() int + + // Clear removes all elements from the set, leaving + // the empty set. + Clear() + + // Clone returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set[T] + + // Contains returns whether the given items + // are all in the set. + Contains(val ...T) bool + + // ContainsOne returns whether the given item + // is in the set. + // + // Contains may cause the argument to escape to the heap. + // See: https://github.com/deckarep/golang-set/issues/118 + ContainsOne(val T) bool + + // ContainsAny returns whether at least one of the + // given items are in the set. + ContainsAny(val ...T) bool + + // Difference returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set[T]) Set[T] + + // Equal determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set[T]) bool + + // Intersect returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set[T]) Set[T] + + // IsEmpty determines if there are elements in the set. + IsEmpty() bool + + // IsProperSubset determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set[T]) bool + + // IsProperSuperset determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set[T]) bool + + // IsSubset determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set[T]) bool + + // IsSuperset determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set[T]) bool + + // Each iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(T) bool) + + // Iter returns a channel of elements that you can + // range over. + Iter() <-chan T + + // Iterator returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator[T] + + // Remove removes a single element from the set. + Remove(i T) + + // RemoveAll removes multiple elements from the set. + RemoveAll(i ...T) + + // String provides a convenient string representation + // of the current state of the set. + String() string + + // SymmetricDifference returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set[T]) Set[T] + + // Union returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + // same type as the receiver of the method. + // Otherwise, Union will panic. + Union(other Set[T]) Set[T] + + // Pop removes and returns an arbitrary item from the set. + Pop() (T, bool) + + // ToSlice returns the members of the set as a slice. + ToSlice() []T + + // MarshalJSON will marshal the set into a JSON-based representation. + MarshalJSON() ([]byte, error) + + // UnmarshalJSON will unmarshal a JSON-based byte slice into a full Set datastructure. + // For this to work, set subtypes must implemented the Marshal/Unmarshal interface. + UnmarshalJSON(b []byte) error +} + +// NewSet creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSet[T comparable](vals ...T) Set[T] { + s := newThreadSafeSetWithSize[T](len(vals)) + for _, item := range vals { + s.Add(item) + } + return s +} + +// NewSetWithSize creates and returns a reference to an empty set with a specified +// capacity. Operations on the resulting set are thread-safe. +func NewSetWithSize[T comparable](cardinality int) Set[T] { + s := newThreadSafeSetWithSize[T](cardinality) + return s +} + +// NewThreadUnsafeSet creates and returns a new set with the given elements. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet[T comparable](vals ...T) Set[T] { + s := newThreadUnsafeSetWithSize[T](len(vals)) + for _, item := range vals { + s.Add(item) + } + return s +} + +// NewThreadUnsafeSetWithSize creates and returns a reference to an empty set with +// a specified capacity. Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSetWithSize[T comparable](cardinality int) Set[T] { + s := newThreadUnsafeSetWithSize[T](cardinality) + return s +} + +// NewSetFromMapKeys creates and returns a new set with the given keys of the map. +// Operations on the resulting set are thread-safe. +func NewSetFromMapKeys[T comparable, V any](val map[T]V) Set[T] { + s := NewSetWithSize[T](len(val)) + + for k := range val { + s.Add(k) + } + + return s +} + +// NewThreadUnsafeSetFromMapKeys creates and returns a new set with the given keys of the map. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSetFromMapKeys[T comparable, V any](val map[T]V) Set[T] { + s := NewThreadUnsafeSetWithSize[T](len(val)) + + for k := range val { + s.Add(k) + } + + return s +} diff --git a/vendor/github.com/deckarep/golang-set/v2/sorted.go b/vendor/github.com/deckarep/golang-set/v2/sorted.go new file mode 100644 index 0000000..8ee2e70 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/sorted.go @@ -0,0 +1,42 @@ +//go:build go1.21 +// +build go1.21 + +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2023 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "cmp" + "slices" +) + +// Sorted returns a sorted slice of a set of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sorted[E cmp.Ordered](set Set[E]) []E { + s := set.ToSlice() + slices.Sort(s) + return s +} diff --git a/vendor/github.com/deckarep/golang-set/v2/threadsafe.go b/vendor/github.com/deckarep/golang-set/v2/threadsafe.go new file mode 100644 index 0000000..93f20c8 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/threadsafe.go @@ -0,0 +1,299 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet[T comparable] struct { + sync.RWMutex + uss *threadUnsafeSet[T] +} + +func newThreadSafeSet[T comparable]() *threadSafeSet[T] { + return &threadSafeSet[T]{ + uss: newThreadUnsafeSet[T](), + } +} + +func newThreadSafeSetWithSize[T comparable](cardinality int) *threadSafeSet[T] { + return &threadSafeSet[T]{ + uss: newThreadUnsafeSetWithSize[T](cardinality), + } +} + +func (t *threadSafeSet[T]) Add(v T) bool { + t.Lock() + ret := t.uss.Add(v) + t.Unlock() + return ret +} + +func (t *threadSafeSet[T]) Append(v ...T) int { + t.Lock() + ret := t.uss.Append(v...) + t.Unlock() + return ret +} + +func (t *threadSafeSet[T]) Contains(v ...T) bool { + t.RLock() + ret := t.uss.Contains(v...) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) ContainsOne(v T) bool { + t.RLock() + ret := t.uss.ContainsOne(v) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) ContainsAny(v ...T) bool { + t.RLock() + ret := t.uss.ContainsAny(v...) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) IsEmpty() bool { + return t.Cardinality() == 0 +} + +func (t *threadSafeSet[T]) IsSubset(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + ret := t.uss.IsSubset(o.uss) + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) IsProperSubset(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + defer t.RUnlock() + o.RLock() + defer o.RUnlock() + + return t.uss.IsProperSubset(o.uss) +} + +func (t *threadSafeSet[T]) IsSuperset(other Set[T]) bool { + return other.IsSubset(t) +} + +func (t *threadSafeSet[T]) IsProperSuperset(other Set[T]) bool { + return other.IsProperSubset(t) +} + +func (t *threadSafeSet[T]) Union(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeUnion := t.uss.Union(o.uss).(*threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeUnion} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Intersect(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeIntersection := t.uss.Intersect(o.uss).(*threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeIntersection} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Difference(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeDifference := t.uss.Difference(o.uss).(*threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeDifference} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) SymmetricDifference(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeDifference := t.uss.SymmetricDifference(o.uss).(*threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeDifference} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Clear() { + t.Lock() + t.uss.Clear() + t.Unlock() +} + +func (t *threadSafeSet[T]) Remove(v T) { + t.Lock() + delete(*t.uss, v) + t.Unlock() +} + +func (t *threadSafeSet[T]) RemoveAll(i ...T) { + t.Lock() + t.uss.RemoveAll(i...) + t.Unlock() +} + +func (t *threadSafeSet[T]) Cardinality() int { + t.RLock() + defer t.RUnlock() + return len(*t.uss) +} + +func (t *threadSafeSet[T]) Each(cb func(T) bool) { + t.RLock() + for elem := range *t.uss { + if cb(elem) { + break + } + } + t.RUnlock() +} + +func (t *threadSafeSet[T]) Iter() <-chan T { + ch := make(chan T) + go func() { + t.RLock() + + for elem := range *t.uss { + ch <- elem + } + close(ch) + t.RUnlock() + }() + + return ch +} + +func (t *threadSafeSet[T]) Iterator() *Iterator[T] { + iterator, ch, stopCh := newIterator[T]() + + go func() { + t.RLock() + L: + for elem := range *t.uss { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + t.RUnlock() + }() + + return iterator +} + +func (t *threadSafeSet[T]) Equal(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + ret := t.uss.Equal(o.uss) + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Clone() Set[T] { + t.RLock() + + unsafeClone := t.uss.Clone().(*threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeClone} + t.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) String() string { + t.RLock() + ret := t.uss.String() + t.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Pop() (T, bool) { + t.Lock() + defer t.Unlock() + return t.uss.Pop() +} + +func (t *threadSafeSet[T]) ToSlice() []T { + keys := make([]T, 0, t.Cardinality()) + t.RLock() + for elem := range *t.uss { + keys = append(keys, elem) + } + t.RUnlock() + return keys +} + +func (t *threadSafeSet[T]) MarshalJSON() ([]byte, error) { + t.RLock() + b, err := t.uss.MarshalJSON() + t.RUnlock() + + return b, err +} + +func (t *threadSafeSet[T]) UnmarshalJSON(p []byte) error { + t.RLock() + err := t.uss.UnmarshalJSON(p) + t.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go b/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go new file mode 100644 index 0000000..7e3243b --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go @@ -0,0 +1,332 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "encoding/json" + "fmt" + "strings" +) + +type threadUnsafeSet[T comparable] map[T]struct{} + +// Assert concrete type:threadUnsafeSet adheres to Set interface. +var _ Set[string] = (*threadUnsafeSet[string])(nil) + +func newThreadUnsafeSet[T comparable]() *threadUnsafeSet[T] { + t := make(threadUnsafeSet[T]) + return &t +} + +func newThreadUnsafeSetWithSize[T comparable](cardinality int) *threadUnsafeSet[T] { + t := make(threadUnsafeSet[T], cardinality) + return &t +} + +func (s threadUnsafeSet[T]) Add(v T) bool { + prevLen := len(s) + s[v] = struct{}{} + return prevLen != len(s) +} + +func (s *threadUnsafeSet[T]) Append(v ...T) int { + prevLen := len(*s) + for _, val := range v { + (*s)[val] = struct{}{} + } + return len(*s) - prevLen +} + +// private version of Add which doesn't return a value +func (s *threadUnsafeSet[T]) add(v T) { + (*s)[v] = struct{}{} +} + +func (s *threadUnsafeSet[T]) Cardinality() int { + return len(*s) +} + +func (s *threadUnsafeSet[T]) Clear() { + // Constructions like this are optimised by compiler, and replaced by + // mapclear() function, defined in + // https://github.com/golang/go/blob/29bbca5c2c1ad41b2a9747890d183b6dd3a4ace4/src/runtime/map.go#L993) + for key := range *s { + delete(*s, key) + } +} + +func (s *threadUnsafeSet[T]) Clone() Set[T] { + clonedSet := newThreadUnsafeSetWithSize[T](s.Cardinality()) + for elem := range *s { + clonedSet.add(elem) + } + return clonedSet +} + +func (s *threadUnsafeSet[T]) Contains(v ...T) bool { + for _, val := range v { + if _, ok := (*s)[val]; !ok { + return false + } + } + return true +} + +func (s *threadUnsafeSet[T]) ContainsOne(v T) bool { + _, ok := (*s)[v] + return ok +} + +func (s *threadUnsafeSet[T]) ContainsAny(v ...T) bool { + for _, val := range v { + if _, ok := (*s)[val]; ok { + return true + } + } + return false +} + +// private version of Contains for a single element v +func (s *threadUnsafeSet[T]) contains(v T) (ok bool) { + _, ok = (*s)[v] + return ok +} + +func (s *threadUnsafeSet[T]) Difference(other Set[T]) Set[T] { + o := other.(*threadUnsafeSet[T]) + + diff := newThreadUnsafeSet[T]() + for elem := range *s { + if !o.contains(elem) { + diff.add(elem) + } + } + return diff +} + +func (s *threadUnsafeSet[T]) Each(cb func(T) bool) { + for elem := range *s { + if cb(elem) { + break + } + } +} + +func (s *threadUnsafeSet[T]) Equal(other Set[T]) bool { + o := other.(*threadUnsafeSet[T]) + + if s.Cardinality() != other.Cardinality() { + return false + } + for elem := range *s { + if !o.contains(elem) { + return false + } + } + return true +} + +func (s *threadUnsafeSet[T]) Intersect(other Set[T]) Set[T] { + o := other.(*threadUnsafeSet[T]) + + intersection := newThreadUnsafeSet[T]() + // loop over smaller set + if s.Cardinality() < other.Cardinality() { + for elem := range *s { + if o.contains(elem) { + intersection.add(elem) + } + } + } else { + for elem := range *o { + if s.contains(elem) { + intersection.add(elem) + } + } + } + return intersection +} + +func (s *threadUnsafeSet[T]) IsEmpty() bool { + return s.Cardinality() == 0 +} + +func (s *threadUnsafeSet[T]) IsProperSubset(other Set[T]) bool { + return s.Cardinality() < other.Cardinality() && s.IsSubset(other) +} + +func (s *threadUnsafeSet[T]) IsProperSuperset(other Set[T]) bool { + return s.Cardinality() > other.Cardinality() && s.IsSuperset(other) +} + +func (s *threadUnsafeSet[T]) IsSubset(other Set[T]) bool { + o := other.(*threadUnsafeSet[T]) + if s.Cardinality() > other.Cardinality() { + return false + } + for elem := range *s { + if !o.contains(elem) { + return false + } + } + return true +} + +func (s *threadUnsafeSet[T]) IsSuperset(other Set[T]) bool { + return other.IsSubset(s) +} + +func (s *threadUnsafeSet[T]) Iter() <-chan T { + ch := make(chan T) + go func() { + for elem := range *s { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (s *threadUnsafeSet[T]) Iterator() *Iterator[T] { + iterator, ch, stopCh := newIterator[T]() + + go func() { + L: + for elem := range *s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +// Pop returns a popped item in case set is not empty, or nil-value of T +// if set is already empty +func (s *threadUnsafeSet[T]) Pop() (v T, ok bool) { + for item := range *s { + delete(*s, item) + return item, true + } + return v, false +} + +func (s threadUnsafeSet[T]) Remove(v T) { + delete(s, v) +} + +func (s threadUnsafeSet[T]) RemoveAll(i ...T) { + for _, elem := range i { + delete(s, elem) + } +} + +func (s threadUnsafeSet[T]) String() string { + items := make([]string, 0, len(s)) + + for elem := range s { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +func (s *threadUnsafeSet[T]) SymmetricDifference(other Set[T]) Set[T] { + o := other.(*threadUnsafeSet[T]) + + sd := newThreadUnsafeSet[T]() + for elem := range *s { + if !o.contains(elem) { + sd.add(elem) + } + } + for elem := range *o { + if !s.contains(elem) { + sd.add(elem) + } + } + return sd +} + +func (s threadUnsafeSet[T]) ToSlice() []T { + keys := make([]T, 0, s.Cardinality()) + for elem := range s { + keys = append(keys, elem) + } + + return keys +} + +func (s threadUnsafeSet[T]) Union(other Set[T]) Set[T] { + o := other.(*threadUnsafeSet[T]) + + n := s.Cardinality() + if o.Cardinality() > n { + n = o.Cardinality() + } + unionedSet := make(threadUnsafeSet[T], n) + + for elem := range s { + unionedSet.add(elem) + } + for elem := range *o { + unionedSet.add(elem) + } + return &unionedSet +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (s threadUnsafeSet[T]) MarshalJSON() ([]byte, error) { + items := make([]string, 0, s.Cardinality()) + + for elem := range s { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (s *threadUnsafeSet[T]) UnmarshalJSON(b []byte) error { + var i []T + err := json.Unmarshal(b, &i) + if err != nil { + return err + } + s.Append(i...) + + return nil +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 88032de..a2e972e 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -293,6 +293,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brendon Smith Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins @@ -347,6 +348,7 @@ Casey Bisson Catalin Pirvu Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Swenson Chance Zibolski @@ -1281,6 +1283,7 @@ Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene +Kristian Heljas Kristina Zabunova Krystian Wojcicki Kunal Kushwaha @@ -1712,6 +1715,7 @@ Patrick Hemmer Patrick St. laurent Patrick Stapleton Patrik Cyvoct +Patrik Leifert pattichen Paul "TBBle" Hampson Paul diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 2c62cd4..d75c43d 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.48" + DefaultVersion = "1.49" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 646032d..1183aaf 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.48" +basePath: "/v1.49" info: title: "Docker Engine API" - version: "1.48" + version: "1.49" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.48) is used. - For example, calling `/info` is the same as calling `/v1.48/info`. Using the + If you omit the version-prefix, the current version of the API (v1.49) is used. + For example, calling `/info` is the same as calling `/v1.49/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -6856,6 +6856,8 @@ definitions: description: "The network pool size" type: "integer" example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" Warnings: description: | List of warnings / informational messages about missing features, or @@ -6939,6 +6941,37 @@ definitions: default: "plugins.moby" example: "plugins.moby" + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: @@ -6984,32 +7017,6 @@ definitions: type: "object" x-nullable: true properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - -


- - > **Deprecated**: Pushing nondistributable artifacts is now always enabled - > and this field is always `null`. This field will be removed in a API v1.49. - type: "array" - items: - type: "string" - example: [] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - -


- - > **Deprecated**: Pushing nondistributable artifacts is now always enabled - > and this field is always `null`. This field will be removed in a API v1.49. - type: "array" - items: - type: "string" - example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -7179,13 +7186,6 @@ definitions: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - - **Deprecated**: This field is deprecated and will be omitted in a API v1.49. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | @@ -10491,13 +10491,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. @@ -10537,6 +10533,7 @@ paths: If not provided, the full multi-platform image will be saved. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] /images/get: get: summary: "Export several images" @@ -10571,6 +10568,16 @@ paths: type: "array" items: type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/load: post: diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go index 78e81f0..40d1f97 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_inspect.go +++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go @@ -128,11 +128,12 @@ type InspectResponse struct { // compatibility. Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` - // Manifests is a list of image manifests available in this image. It + // Manifests is a list of image manifests available in this image. It // provides a more detailed view of the platform-specific image manifests or // other image-attached data like build attestations. // - // Only available if the daemon provides a multi-platform image store. + // Only available if the daemon provides a multi-platform image store, the client + // requests manifests AND does not request a specific platform. // // WARNING: This is experimental and may change at any time without any backward // compatibility. diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 919510f..57800e0 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -106,6 +106,11 @@ type LoadOptions struct { type InspectOptions struct { // Manifests returns the image manifests. Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform } // SaveOptions holds parameters to save images. diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 8117cb0..14c82aa 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -1,3 +1,6 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.23 + package registry // import "github.com/docker/docker/api/types/registry" import ( @@ -15,23 +18,26 @@ type ServiceConfig struct { InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` Mirrors []string + + // ExtraFields is for internal use to include deprecated fields on older API versions. + ExtraFields map[string]any `json:"-"` } // MarshalJSON implements a custom marshaler to include legacy fields // in API responses. -func (sc ServiceConfig) MarshalJSON() ([]byte, error) { - tmp := map[string]interface{}{ - "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs, - "IndexConfigs": sc.IndexConfigs, - "Mirrors": sc.Mirrors, - } - if sc.AllowNondistributableArtifactsCIDRs != nil { - tmp["AllowNondistributableArtifactsCIDRs"] = nil +func (sc *ServiceConfig) MarshalJSON() ([]byte, error) { + type tmp ServiceConfig + base, err := json.Marshal((*tmp)(sc)) + if err != nil { + return nil, err } - if sc.AllowNondistributableArtifactsHostnames != nil { - tmp["AllowNondistributableArtifactsHostnames"] = nil + var merged map[string]any + _ = json.Unmarshal(base, &merged) + + for k, v := range sc.ExtraFields { + merged[k] = v } - return json.Marshal(tmp) + return json.Marshal(merged) } // NetIPNet is the net.IPNet type, which can be marshalled and diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 8a2444d..27173d4 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -73,6 +73,7 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"` CDISpecDirs []string Containerd *ContainerdInfo `json:",omitempty"` @@ -143,7 +144,7 @@ type Commit struct { // Expected is the commit ID of external tool expected by dockerd as set at build time. // // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions. - Expected string + Expected string `json:",omitempty"` } // NetworkAddressPool is a temp struct used by [Info] struct. @@ -151,3 +152,11 @@ type NetworkAddressPool struct { Base string Size int } + +// FirewallInfo describes the firewall backend. +type FirewallInfo struct { + // Driver is the name of the firewall backend driver. + Driver string `json:"Driver"` + // Info is a list of label/value pairs, containing information related to the firewall. + Info [][2]string `json:"Info,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 9b46a1f..4838ac7 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti if tagged, ok := ref.(reference.Tagged); ok { tag = tagged.Tag() } - repository = reference.FamiliarName(ref) + repository = ref.Name() } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 0357051..1aa061e 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) query.Set("tag", getAPITagFromNamedRef(ref)) if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 1161195..d88f0f1 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts query.Set("manifests", "1") } + if opts.apiOptions.Platform != nil { + if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil { + return image.InspectResponse{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return image.InspectResponse{}, err + } + query.Set("platform", platform) + } + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/docker/docker/client/image_inspect_opts.go index 2607f36..655cbf0 100644 --- a/vendor/github.com/docker/docker/client/image_inspect_opts.go +++ b/vendor/github.com/docker/docker/client/image_inspect_opts.go @@ -4,6 +4,7 @@ import ( "bytes" "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageInspectOption is a type representing functional options for the image inspect operation. @@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption { }) } +// ImageInspectWithPlatform sets platform API option for the image inspect operation. +// This option is only available for API version 1.49 and up. +// With this option set, the image inspect operation will return information for the +// specified platform variant of the multi-platform image. +func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Platform = platform + return nil + }) +} + // ImageInspectWithAPIOpts sets the API options for the image inspect operation. func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption { return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 4286942..f5fe85d 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) if !options.All { query.Set("tag", getAPITagFromNamedRef(ref)) } diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index b340bc4..1a343f4 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu return nil, errors.New("cannot push a digest reference") } - name := reference.FamiliarName(ref) query := url.Values{} if !options.All { ref = reference.TagNameOnly(ref) @@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu query.Set("platform", string(pJson)) } - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader) } if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index ea6b4a1..25c7360 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error { ref = reference.TagNameOnly(ref) query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) + query.Set("repo", ref.Name()) if tagged, ok := ref.(reference.Tagged); ok { query.Set("tag", tagged.Tag()) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 2b913aa..4cc6435 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -237,7 +237,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { } var daemonErr error - if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { + if serverResp.Header.Get("Content-Type") == "application/json" { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 9bbb11c..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1507 +0,0 @@ -// Package archive provides helper functions for dealing with archive files. -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/idtools" - "github.com/klauspost/compress/zstd" - "github.com/moby/patternmatcher" - "github.com/moby/sys/sequential" -) - -// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a -// tar, but that do not have their own header entry. -// -// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not -// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and -// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. -// -// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is -// subject to change in Moby at any time -- image authors who require consistent or known directory permissions -// should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0o755 - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - IDMap idtools.IdentityMapping - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - // Allow unpacking to succeed in spite of failures to set extended - // attributes on the unpacked files due to the destination filesystem - // not supporting them or a lack of permissions. Extended attributes - // were probably in the archive for a reason, so set this option at - // your own peril. - BestEffortXattrs bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - Uncompressed Compression = 0 // Uncompressed represents the uncompressed. - Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm. - Gzip Compression = 2 // Gzip is gzip compression algorithm. - Xz Compression = 3 // Xz is xz compression algorithm. - Zstd Compression = 4 // Zstd is zstd compression algorithm. -) - -const ( - AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts - OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -const ( - zstdMagicSkippableStart = 0x184D2A50 - zstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - bzip2Magic = []byte{0x42, 0x5A, 0x68} - gzipMagic = []byte{0x1F, 0x8B, 0x08} - xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} - zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} -) - -type matcher = func([]byte) bool - -func magicNumberMatcher(m []byte) matcher { - return func(source []byte) bool { - return bytes.HasPrefix(source, m) - } -} - -// zstdMatcher detects zstd compression algorithm. -// Zstandard compressed data is made of one or more frames. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. -func zstdMatcher() matcher { - return func(source []byte) bool { - if bytes.HasPrefix(source, zstdMagic) { - // Zstandard frame - return true - } - // skippable frame - if len(source) < 8 { - return false - } - // magic number from 0x184D2A50 to 0x184D2A5F. - if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { - return true - } - return false - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - compressionMap := map[Compression]matcher{ - Bzip2: magicNumberMatcher(bzip2Magic), - Gzip: magicNumberMatcher(gzipMagic), - Xz: magicNumberMatcher(xzMagic), - Zstd: zstdMatcher(), - } - for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { - fn := compressionMap[compression] - if fn(source) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { - noPigz, err := strconv.ParseBool(noPigzEnv) - if err != nil { - log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") - } - if noPigz { - log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) - return gzip.NewReader(buf) - } - } - - unpigzPath, err := exec.LookPath("unpigz") - if err != nil { - log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") - return gzip.NewReader(buf) - } - - log.G(ctx).Debugf("Using %s to decompress", unpigzPath) - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -type readCloserWrapper struct { - io.Reader - closer func() error - closed atomic.Bool -} - -func (r *readCloserWrapper) Close() error { - if !r.closed.CompareAndSwap(false, true) { - log.G(context.TODO()).Error("subsequent attempt to close readCloserWrapper") - if log.GetLevel() >= log.DebugLevel { - log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) - } - - return nil - } - if r.closer != nil { - return r.closer() - } - return nil -} - -var bufioReader32KPool = &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, -} - -type bufferedReader struct { - buf *bufio.Reader -} - -func newBufferedReader(r io.Reader) *bufferedReader { - buf := bufioReader32KPool.Get().(*bufio.Reader) - buf.Reset(r) - return &bufferedReader{buf} -} - -func (r *bufferedReader) Read(p []byte) (int, error) { - if r.buf == nil { - return 0, io.EOF - } - n, err := r.buf.Read(p) - if err == io.EOF { - r.buf.Reset(nil) - bufioReader32KPool.Put(r.buf) - r.buf = nil - } - return n, err -} - -func (r *bufferedReader) Peek(n int) ([]byte, error) { - if r.buf == nil { - return nil, io.EOF - } - return r.buf.Peek(n) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - buf := newBufferedReader(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - return &readCloserWrapper{ - Reader: buf, - }, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - return &readCloserWrapper{ - Reader: gzReader, - closer: func() error { - cancel() - return gzReader.Close() - }, - }, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - return &readCloserWrapper{ - Reader: bz2Reader, - }, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - - return &readCloserWrapper{ - Reader: xzReader, - closer: func() error { - cancel() - return xzReader.Close() - }, - }, nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - return &readCloserWrapper{ - Reader: zstdReader, - closer: func() error { - zstdReader.Close() - return nil - }, - }, nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -type nopWriteCloser struct { - io.Writer -} - -func (nopWriteCloser) Close() error { return nil } - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - switch compression { - case Uncompressed: - return nopWriteCloser{dest}, nil - case Gzip: - return gzip.NewWriter(dest), nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - if header.Name == "" { - header.Name = name - } - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if err := copyWithBuffer(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - case Zstd: - return "tar.zst" - } - return "" -} - -// assert that we implement [tar.FileInfoNames]. -// -// TODO(thaJeztah): disabled to allow compiling on < go1.23. un-comment once we drop support for older versions of go. -// var _ tar.FileInfoNames = (*nosysFileInfo)(nil) - -// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to -// prevent tar.FileInfoHeader from introspecting it and potentially calling into -// glibc. -// -// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader] -// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102 -type nosysFileInfo struct { - os.FileInfo -} - -// Uname stubs out looking up username. It implements [tar.FileInfoNames] -// to prevent [tar.FileInfoHeader] from loading libraries to perform -// username lookups. -func (fi nosysFileInfo) Uname() (string, error) { - return "", nil -} - -// Gname stubs out looking up group-name. It implements [tar.FileInfoNames] -// to prevent [tar.FileInfoHeader] from loading libraries to perform -// username lookups. -func (fi nosysFileInfo) Gname() (string, error) { - return "", nil -} - -func (fi nosysFileInfo) Sys() interface{} { - // A Sys value of type *tar.Header is safe as it is system-independent. - // The tar.FileInfoHeader function copies the fields into the returned - // header without performing any OS lookups. - if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { - return sys - } - return nil -} - -// sysStat, if non-nil, populates hdr from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, hdr *tar.Header) error - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Compared to the archive/tar.FileInfoHeader function, this function is safe to -// call from a chrooted process as it does not populate fields which would -// require operating system lookups. It behaves identically to -// tar.FileInfoHeader when fi is a FileInfo value returned from -// tar.Header.FileInfo(). -// -// When fi is a FileInfo for a native file, such as returned from os.Stat() and -// os.Lstat(), the returned Header value differs from one returned from -// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not -// set as OS lookups would be required to populate them. The AccessTime and -// ChangeTime fields are not currently set (not yet implemented) although that -// is subject to change. Callers which require the AccessTime or ChangeTime -// fields to be zeroed should explicitly zero them out in the returned Header -// value to avoid any compatibility issues in the future. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) - if err != nil { - return nil, err - } - if sysStat != nil { - return hdr, sysStat(fi, hdr) - } - return hdr, nil -} - -// FileInfoHeader creates a populated Header from fi. -// -// Compared to the archive/tar package, this function fills in less information -// but is safe to call from a chrooted process. The AccessTime and ChangeTime -// fields are not set in the returned header, ModTime is truncated to one-second -// precision, and the Uname and Gname fields are only set when fi is a FileInfo -// value returned from tar.Header.FileInfo(). -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := FileInfoHeaderNoLookups(fi, link) - if err != nil { - return nil, err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - hdr.Name = canonicalTarName(name, fi.IsDir()) - return hdr, nil -} - -const paxSchilyXattr = "SCHILY.xattr." - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - const ( - // Values based on linux/include/uapi/linux/capability.h - xattrCapsSz2 = 20 - versionOffset = 3 - vfsCapRevision2 = 2 - vfsCapRevision3 = 3 - ) - capability, _ := lgetxattr(path, "security.capability") - if capability != nil { - if capability[versionOffset] == vfsCapRevision3 { - // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no - // sense outside the user namespace the archive is built in. - capability[versionOffset] = vfsCapRevision2 - capability = capability[:xattrCapsSz2] - } - if hdr.PAXRecords == nil { - hdr.PAXRecords = make(map[string]string) - } - hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent POSIX-style -// path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) string { - name = filepath.ToSlash(name) - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - // check whether the file is overlayfs whiteout - // if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - // handle re-mapping container ID mappings back to host ID mappings before - // writing tar headers/files. We skip whiteout files because they were written - // by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use sequential file access to avoid depleting the standby list on - // Windows. On Linux, this equates to a regular os.Open. - file, err := sequential.Open(path) - if err != nil { - return err - } - - err = copyWithBuffer(ta.TarWriter, file) - file.Close() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { - var ( - Lchown = true - inUserns, bestEffortXattrs bool - chownOpts *idtools.Identity - ) - - // TODO(thaJeztah): make opts a required argument. - if opts != nil { - Lchown = !opts.NoLchown - inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally. - chownOpts = opts.ChownOpts - bestEffortXattrs = opts.BestEffortXattrs - } - - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg: - // Source is regular file. We use sequential file access to avoid depleting - // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. - file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if err := copyWithBuffer(file, reader); err != nil { - _ = file.Close() - return err - } - _ = file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns") - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - if inUserns && errors.Is(err, syscall.EPERM) { - // In most cases, cannot create a fifo if running in user namespace - log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns") - return nil - } - return err - } - - case tar.TypeLink: - // #nosec G305 -- The target path is checked for path traversal. - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - var msg string - if inUserns && errors.Is(err, syscall.EINVAL) { - msg = " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" - } - return fmt.Errorf("failed to Lchown %q for UID %d, GID %d%s: %w", path, hdr.Uid, hdr.Gid, msg, err) - } - } - - var xattrErrs []string - for key, value := range hdr.PAXRecords { - xattr, ok := strings.CutPrefix(key, paxSchilyXattr) - if !ok { - continue - } - if err := lsetxattr(path, xattr, []byte(value), 0); err != nil { - if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { - // EPERM occurs if modifying xattrs is not allowed. This can - // happen when running in userns with restrictions (ChromeOS). - xattrErrs = append(xattrErrs, err.Error()) - continue - } - return err - } - } - - if len(xattrErrs) > 0 { - log.G(context.TODO()).WithFields(log.Fields{ - "errors": xattrErrs, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := boundTime(latestTime(hdr.AccessTime, hdr.ModTime)) - mTime := boundTime(hdr.ModTime) - - // chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := chtimes(path, aTime, mTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := chtimes(path, aTime, mTime); err != nil { - return err - } - } else { - if err := lchtimes(path, aTime, mTime); err != nil { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - tb, err := NewTarballer(srcPath, options) - if err != nil { - return nil, err - } - go tb.Do() - return tb.Reader(), nil -} - -// Tarballer is a lower-level interface to TarWithOptions which gives the caller -// control over which goroutine the archiving operation executes on. -type Tarballer struct { - srcPath string - options *TarOptions - pm *patternmatcher.PatternMatcher - pipeReader *io.PipeReader - pipeWriter *io.PipeWriter - compressWriter io.WriteCloser - whiteoutConverter tarWhiteoutConverter -} - -// NewTarballer constructs a new tarballer. The arguments are the same as for -// TarWithOptions. -func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) { - pm, err := patternmatcher.New(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - return &Tarballer{ - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath: addLongPathPrefix(srcPath), - options: options, - pm: pm, - pipeReader: pipeReader, - pipeWriter: pipeWriter, - compressWriter: compressWriter, - whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - }, nil -} - -// Reader returns the reader for the created archive. -func (t *Tarballer) Reader() io.ReadCloser { - return t.pipeReader -} - -// Do performs the archiving operation in the background. The resulting archive -// can be read from t.Reader(). Do should only be called once on each Tarballer -// instance. -func (t *Tarballer) Do() { - ta := newTarAppender( - t.options.IDMap, - t.compressWriter, - t.options.ChownOpts, - ) - ta.WhiteoutConverter = t.whiteoutConverter - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) - } - if err := t.compressWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) - } - if err := t.pipeWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) - } - }() - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(t.srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(t.options.IncludeFiles) > 0 { - log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(t.srcPath) - t.srcPath = dir - t.options.IncludeFiles = []string{base} - } - - if len(t.options.IncludeFiles) == 0 { - t.options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range t.options.IncludeFiles { - rebaseName := t.options.RebaseNames[include] - - var ( - parentMatchInfo []patternmatcher.MatchInfo - parentDirs []string - ) - - walkRoot := getWalkRoot(t.srcPath, include) - filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { - if err != nil { - log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(t.srcPath, filePath) - if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if t.options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - for len(parentDirs) != 0 { - lastParentDir := parentDirs[len(parentDirs)-1] - if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { - break - } - parentDirs = parentDirs[:len(parentDirs)-1] - parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] - } - - var matchInfo patternmatcher.MatchInfo - if len(parentMatchInfo) != 0 { - skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) - } else { - skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) - } - if err != nil { - log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) - return err - } - - if f.IsDir() { - parentDirs = append(parentDirs, relFilePath) - parentMatchInfo = append(parentMatchInfo, matchInfo) - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !t.pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range t.pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - - var dirs []*tar.Header - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // ignore XGlobalHeader early to avoid creating parent directories for them - if hdr.Typeflag == tar.TypeXGlobalHeader { - log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) - continue - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return err - } - - // #nosec G305 -- The joined path is checked for path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - - if err := remapIDs(options.IDMap, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, tr, options); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - - if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { - return err - } - } - return nil -} - -// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do -// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is -// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus -// we most both create them and choose metadata like permissions. -// -// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS -// on which the daemon is running. This precondition is required because this function assumes a OS-specific path -// separator when checking that a path is not the root. -func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { - // Not the root directory, ensure that the parent directory exists - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some - // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche - // usage that reduces the portability of an image. - rootIDs := options.IDMap.RootPair() - - err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) - if err != nil { - return err - } - } - } - - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMapping.RootPair() - // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { - return err - } - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := FileInfoHeaderNoLookups(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMapping, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if err := copyWithBuffer(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { - return archiver.IDMapping -} - -func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { - ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Ensure the command has exited before we clean anything up - done := make(chan struct{}) - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(done) - }() - - return &readCloserWrapper{ - Reader: pipeR, - closer: func() error { - // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as - // cmd.Wait waits for any non-file stdout/stderr/stdin to close. - err := pipeR.Close() - <-done - return err - }, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go new file mode 100644 index 0000000..5bdbdef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go @@ -0,0 +1,259 @@ +// Package archive provides helper functions for dealing with archive files. +package archive + +import ( + "archive/tar" + "io" + "os" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/go-archive" + "github.com/moby/go-archive/compression" + "github.com/moby/go-archive/tarheader" +) + +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// Deprecated: use [archive.ImpliedDirectoryMode] instead. +const ImpliedDirectoryMode = archive.ImpliedDirectoryMode + +type ( + // Compression is the state represents if compressed or not. + // + // Deprecated: use [compression.Compression] instead. + Compression = compression.Compression + // WhiteoutFormat is the format of whiteouts unpacked + // + // Deprecated: use [archive.WhiteoutFormat] instead. + WhiteoutFormat = archive.WhiteoutFormat + + // TarOptions wraps the tar options. + // + // Deprecated: use [archive.TarOptions] instead. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression compression.Compression + NoLchown bool + IDMap idtools.IdentityMapping + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat archive.WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // Allow unpacking to succeed in spite of failures to set extended + // attributes on the unpacked files due to the destination filesystem + // not supporting them or a lack of permissions. Extended attributes + // were probably in the archive for a reason, so set this option at + // your own peril. + BestEffortXattrs bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +// +// Deprecated: use [archive.Archiver] instead. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +// +// Deprecated: use [archive.NewDefaultArchiver] instead. +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar} +} + +const ( + Uncompressed = compression.None // Deprecated: use [compression.None] instead. + Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead. + Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead. + Xz = compression.Xz // Deprecated: use [compression.Xz] instead. + Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead. +) + +const ( + AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead. + OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead. +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +// +// Deprecated: use [archive.IsArchivePath] instead. +func IsArchivePath(path string) bool { + return archive.IsArchivePath(path) +} + +// DetectCompression detects the compression algorithm of the source. +// +// Deprecated: use [compression.Detect] instead. +func DetectCompression(source []byte) archive.Compression { + return compression.Detect(source) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +// +// Deprecated: use [compression.DecompressStream] instead. +func DecompressStream(arch io.Reader) (io.ReadCloser, error) { + return compression.DecompressStream(arch) +} + +// CompressStream compresses the dest with specified compression algorithm. +// +// Deprecated: use [compression.CompressStream] instead. +func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) { + return compression.CompressStream(dest, comp) +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper. +// +// Deprecated: use [archive.TarModifierFunc] instead. +type TarModifierFunc = archive.TarModifierFunc + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. +// +// Deprecated: use [archive.ReplaceFileTarWrapper] instead. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser { + return archive.ReplaceFileTarWrapper(inputTarStream, mods) +} + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + return tarheader.FileInfoHeaderNoLookups(fi, link) +} + +// FileInfoHeader creates a populated Header from fi. +// +// Deprecated: use [archive.FileInfoHeader] instead. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + return archive.FileInfoHeader(name, fi, link) +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +// +// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead. +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + return archive.ReadSecurityXattrToTarHeader(path, hdr) +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +// +// Deprecated: use [archive.Tar] instead. +func Tar(path string, compression archive.Compression) (io.ReadCloser, error) { + return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive with the given options. +// +// Deprecated: use [archive.TarWithOptions] instead. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + return archive.TarWithOptions(srcPath, toArchiveOpt(options)) +} + +// Tarballer is a lower-level interface to TarWithOptions. +// +// Deprecated: use [archive.Tarballer] instead. +type Tarballer = archive.Tarballer + +// NewTarballer constructs a new tarballer using TarWithOptions. +// +// Deprecated: use [archive.Tarballer] instead. +func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) { + return archive.NewTarballer(srcPath, toArchiveOpt(options)) +} + +// Unpack unpacks the decompressedArchive to dest with options. +// +// Deprecated: use [archive.Unpack] instead. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options)) +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// +// Deprecated: use [archive.Untar] instead. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return archive.Untar(tarArchive, dest, toArchiveOpt(options)) +} + +// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +// +// Deprecated: use [archive.UntarUncompressed] instead. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options)) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + return (&archive.Archiver{ + Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { + return archiver.Untar(reader, s, &TarOptions{ + IDMap: archiver.IDMapping, + }) + }, + IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), + }).TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + return (&archive.Archiver{ + Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { + return archiver.Untar(reader, s, &TarOptions{ + IDMap: archiver.IDMapping, + }) + }, + IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), + }).UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + return (&archive.Archiver{ + Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { + return archiver.Untar(reader, s, nil) + }, + IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), + }).CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + return (&archive.Archiver{ + Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { + return archiver.Untar(reader, s, nil) + }, + IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), + }).CopyFileWithTar(src, dst) +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { + return archiver.IDMapping +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 7b6c3e0..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,107 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/moby/sys/userns" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0o600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir == 0 { - // FIXME(thaJeztah): return a sentinel error instead of nil, nil - return nil, nil - } - - opaqueXattrName := "trusted.overlay.opaque" - if userns.RunningInUserNS() { - opaqueXattrName = "user.overlay.opaque" - } - - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := lgetxattr(path, opaqueXattrName) - if err != nil { - return nil, err - } - if len(opaque) != 1 || opaque[0] != 'y' { - // FIXME(thaJeztah): return a sentinel error instead of nil, nil - return nil, nil - } - delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName) - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - return &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted. - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - }, nil -} - -func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - opaqueXattrName := "trusted.overlay.opaque" - if userns.RunningInUserNS() { - opaqueXattrName = "user.overlay.opaque" - } - - err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0) - if err != nil { - return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err) - } - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err) - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 6495549..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index bc6b25a..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,126 +0,0 @@ -//go:build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/docker/docker/pkg/idtools" - "golang.org/x/sys/unix" -) - -func init() { - sysStat = statUnix -} - -// addLongPathPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. It is a no-op on platforms other than Windows. -func addLongPathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -// statUnix populates hdr from system-dependent fields of fi without performing -// any OS lookups. -func statUnix(fi os.FileInfo, hdr *tar.Header) error { - // Devmajor and Devminor are only needed for special devices. - - // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): - // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 - // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). - - // ZFS in particular does not override the default: - // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 - - // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). - // Such large values cannot be encoded in a tar header. - if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { - return nil - } - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - - hdr.Uid = int(s.Uid) - hdr.Gid = int(s.Gid) - - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert - } - - return nil -} - -func getInodeFromStat(stat interface{}) (uint64, error) { - s, ok := stat.(*syscall.Stat_t) - if !ok { - // FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897 - return 0, nil - } - return s.Ino, nil -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo. -// -// Creating device nodes is not supported when running in a user namespace, -// produces a [syscall.EPERM] in most cases. -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 0o7777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index fd2546e..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/idtools" -) - -// longPathPrefix is the longpath prefix for Windows file paths. -const longPathPrefix = `\\?\` - -// addLongPathPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. It is a no-op on platforms other than Windows. -// -// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. -func addLongPathPrefix(srcPath string) string { - if strings.HasPrefix(srcPath, longPathPrefix) { - return srcPath - } - if strings.HasPrefix(srcPath, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - return longPathPrefix + `UNC` + srcPath[1:] - } - return longPathPrefix + srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - // Remove group- and world-writable bits. - perm &= 0o755 - - // Add the x bit: make everything +x on Windows - return perm | 0o111 -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (uint64, error) { - // do nothing. no notion of Inode in stat on Windows - return 0, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{UID: 0, GID: 0}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 1c0509d..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,430 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "context" - "fmt" - "io" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/idtools" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - ChangeModify = 0 // ChangeModify represents the modify operation. - ChangeAdd = 1 // ChangeAdd represents the add operation. - ChangeDelete = 2 // ChangeDelete represents the delete operation. -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar doesn't have sub-second mtime precision. The go tar -// writer (1.10+) does when using PAX format, but we round times to seconds -// to ensure archives have the same hashes for backwards compatibility. -// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. -// -// Non-sub-second is problematic when we apply changes via tar -// files. We handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a.Equal(b) || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return skip, err -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type ( - skipChange func(string) (bool, error) - deleteChange func(string, string, os.FileInfo) (string, error) -) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat fs.FileInfo - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var oldRoot, newRoot *FileInfo - if oldDir == "" { - emptyDir, err := os.MkdirTemp("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idMap, writer, nil) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.G(context.TODO()).Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go new file mode 100644 index 0000000..48c7523 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go @@ -0,0 +1,56 @@ +package archive + +import ( + "io" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/go-archive" +) + +// ChangeType represents the change +// +// Deprecated: use [archive.ChangeType] instead. +type ChangeType = archive.ChangeType + +const ( + ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead. + ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead. + ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead. +) + +// Change represents a change. +// +// Deprecated: use [archive.Change] instead. +type Change = archive.Change + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +// +// Deprecated: use [archive.Changes] instead. +func Changes(layers []string, rw string) ([]archive.Change, error) { + return archive.Changes(layers, rw) +} + +// FileInfo describes the information of a file. +// +// Deprecated: use [archive.FileInfo] instead. +type FileInfo = archive.FileInfo + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// +// Deprecated: use [archive.ChangesDirs] instead. +func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) { + return archive.ChangesDirs(newDir, oldDir) +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +// +// Deprecated: use [archive.ChangesSize] instead. +func ChangesSize(newDir string, changes []archive.Change) int64 { + return archive.ChangesSize(newDir, changes) +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { + return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap)) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 9a041b0..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,281 +0,0 @@ -package archive - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - info.stat = fi - info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch strings.Compare(ni1.name, ni2.name) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" - name := string(b[0:clen(b[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index a8a3a5a..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - s, err := os.Lstat(path) - if err != nil { - return err - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - stat: s, - } - - info.capability, _ = lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 4dd98bd..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !windows - -package archive - -import ( - "io/fs" - "os" - "syscall" -) - -func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { - oldSys := oldStat.Sys().(*syscall.Stat_t) - newSys := newStat.Sys().(*syscall.Stat_t) - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldSys.Uid != newSys.Uid || - oldSys.Gid != newSys.Gid || - oldSys.Rdev != newSys.Rdev || - // Don't look at size or modification time for dirs, its not a good - // measure of change. See https://github.com/moby/moby/issues/9874 - // for a description of the issue with modification time, and - // https://github.com/moby/moby/pull/11422 for the change. - // (Note that in the Windows implementation of this function, - // modification time IS taken as a change). See - // https://github.com/moby/moby/pull/37982 for more information. - (!oldStat.Mode().IsDir() && - (!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index c89605c..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -package archive - -import ( - "io/fs" - "os" -) - -func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { - // Note there is slight difference between the Linux and Windows - // implementations here. Due to https://github.com/moby/moby/issues/9874, - // and the fix at https://github.com/moby/moby/pull/11422, Linux does not - // consider a change to the directory time as a change. Windows on NTFS - // does. See https://github.com/moby/moby/pull/37982 for more information. - - if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index cae0173..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,497 +0,0 @@ -package archive - -import ( - "archive/tar" - "context" - "errors" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/containerd/log" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -var copyPool = sync.Pool{ - New: func() interface{} { s := make([]byte, 32*1024); return &s }, -} - -func copyWithBuffer(dst io.Writer, src io.Reader) error { - buf := copyPool.Get().(*[]byte) - _, err := io.CopyBuffer(dst, src, *buf) - copyPool.Put(buf) - return err -} - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && path[len(path)-1] == filepath.Separator -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { - sourcePath = normalizePath(sourcePath) - if _, err := os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return nil, err - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !filepath.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, io.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - // srcContent tar stream, as served by TarWithOptions(), is - // definitely in PAX format, but tar.Next() mistakenly guesses it - // as USTAR, which creates a problem: if the newBase is >100 - // characters long, WriteHeader() returns an error like - // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". - // - // To fix, set the format to PAX here. See docker/for-linux issue #484. - hdr.Format = tar.FormatPAX - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 - // and https://cure53.de/pentest-report_opa.pdf, which recommends to - // replace io.Copy with io.CopyN7. The latter allows to specify the - // maximum number of bytes that should be read. By properly defining - // the limit, it can be assured that a GZip compression bomb cannot - // easily cause a Denial-of-Service. - // After reviewing with @tonistiigi and @cpuguy83, this should not - // affect us, because here we do not read into memory, hence should - // not be vulnerable to this code consuming memory. - //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { - if followLink { - var err error - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return "", "", err - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - resolvedDirPath, err := filepath.EvalSymlinks(dirPath) - if err != nil { - return "", "", err - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && - !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go new file mode 100644 index 0000000..1901e55 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go @@ -0,0 +1,130 @@ +package archive + +import ( + "io" + + "github.com/moby/go-archive" + "github.com/moby/go-archive/compression" +) + +var ( + ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead. + ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead. + ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead. + ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead. +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path. +// +// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { + return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath) +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename. +// +// Deprecated: use [archive.SplitPathDirEntry] instead. +func SplitPathDirEntry(path string) (dir, base string) { + return archive.SplitPathDirEntry(path) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. +// +// Deprecated: use [archive.TarResource] instead. +func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) { + return archive.TarResource(sourceInfo) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +// +// Deprecated: use [archive.TarResourceRebase] instead. +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { + return archive.TarResourceRebase(sourcePath, rebaseName) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions. +// +// Deprecated: use [archive.TarResourceRebaseOpts] instead. +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: compression.None, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source or destination path of a copy operation. +// +// Deprecated: use [archive.CopyInfo] instead. +type CopyInfo = archive.CopyInfo + +// CopyInfoSourcePath stats the given path to create a CopyInfo struct. +// struct representing that resource for the source of an archive copy +// operation. +// +// Deprecated: use [archive.CopyInfoSourcePath] instead. +func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) { + return archive.CopyInfoSourcePath(path, followLink) +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. +// +// Deprecated: use [archive.CopyInfoDestinationPath] instead. +func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) { + return archive.CopyInfoDestinationPath(path) +} + +// PrepareArchiveCopy prepares the given srcContent archive. +// +// Deprecated: use [archive.PrepareArchiveCopy] instead. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) { + return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo) +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +// +// Deprecated: use [archive.RebaseArchiveEntries] instead. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + return archive.RebaseArchiveEntries(srcContent, oldBase, newBase) +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. +// +// Deprecated: use [archive.CopyResource] instead. +func CopyResource(srcPath, dstPath string, followLink bool) error { + return archive.CopyResource(srcPath, dstPath, followLink) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +// +// Deprecated: use [archive.CopyTo] instead. +func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error { + return archive.CopyTo(content, srcInfo, dstPath) +} + +// ResolveHostSourcePath decides real path need to be copied. +// +// Deprecated: use [archive.ResolveHostSourcePath] instead. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { + return archive.ResolveHostSourcePath(path, followLink) +} + +// GetRebaseName normalizes and compares path and resolvedPath. +// +// Deprecated: use [archive.GetRebaseName] instead. +func GetRebaseName(path, resolvedPath string) (string, string) { + return archive.GetRebaseName(path, resolvedPath) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index f579282..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b4..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go b/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go deleted file mode 100644 index aa8e291..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/dev_freebsd.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build freebsd - -package archive - -import "golang.org/x/sys/unix" - -var mknod = unix.Mknod diff --git a/vendor/github.com/docker/docker/pkg/archive/dev_unix.go b/vendor/github.com/docker/docker/pkg/archive/dev_unix.go deleted file mode 100644 index dffc596..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/dev_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows && !freebsd - -package archive - -import "golang.org/x/sys/unix" - -func mknod(path string, mode uint32, dev uint64) error { - return unix.Mknod(path, mode, int(dev)) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index d5a394c..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,258 +0,0 @@ -package archive - -import ( - "archive/tar" - "context" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/containerd/log" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return 0, err - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - // #nosec G305 -- The joined path is guarded against path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - return os.RemoveAll(path) - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - srcData := io.Reader(tr) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(options.IDMap, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// IsEmpty checks if the tar archive is empty (doesn't contain any entries). -func IsEmpty(rd io.Reader) (bool, error) { - decompRd, err := DecompressStream(rd) - if err != nil { - return true, fmt.Errorf("failed to decompress archive: %v", err) - } - defer decompRd.Close() - - tarReader := tar.NewReader(decompRd) - if _, err := tarReader.Next(); err != nil { - if err == io.EOF { - return true, nil - } - return false, fmt.Errorf("failed to read next archive header: %v", err) - } - - return false, nil -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - restore := overrideUmask(0) - defer restore() - - if decompress { - decompLayer, err := DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompLayer.Close() - layer = decompLayer - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go new file mode 100644 index 0000000..dd5e0d5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go @@ -0,0 +1,37 @@ +package archive + +import ( + "io" + + "github.com/moby/go-archive" +) + +// UnpackLayer unpack `layer` to a `dest`. +// +// Deprecated: use [archive.UnpackLayer] instead. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + return archive.UnpackLayer(dest, layer, toArchiveOpt(options)) +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. +// +// Deprecated: use [archive.ApplyLayer] instead. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return archive.ApplyLayer(dest, layer) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. +// +// Deprecated: use [archive.ApplyUncompressedLayer] instead. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options)) +} + +// IsEmpty checks if the tar archive is empty (doesn't contain any entries). +// +// Deprecated: use [archive.IsEmpty] instead. +func IsEmpty(rd io.Reader) (bool, error) { + return archive.IsEmpty(rd) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go deleted file mode 100644 index 7216f2f..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !windows - -package archive - -import "golang.org/x/sys/unix" - -// overrideUmask sets current process's file mode creation mask to newmask -// and returns a function to restore it. -// -// WARNING for readers stumbling upon this code. Changing umask in a multi- -// threaded environment isn't safe. Don't use this without understanding the -// risks, and don't export this function for others to use (we shouldn't even -// be using this ourself). -// -// FIXME(thaJeztah): we should get rid of these hacks if possible. -func overrideUmask(newMask int) func() { - oldMask := unix.Umask(newMask) - return func() { - unix.Umask(oldMask) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go deleted file mode 100644 index d28f5b2..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package archive - -// overrideUmask is a no-op on windows. -func overrideUmask(newmask int) func() { - return func() {} -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path.go b/vendor/github.com/docker/docker/pkg/archive/path.go deleted file mode 100644 index 888a697..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/path.go +++ /dev/null @@ -1,20 +0,0 @@ -package archive - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. -// On Linux: this is a no-op. -// On Windows: this does the following> -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return checkSystemDriveAndRemoveDriveLetter(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go new file mode 100644 index 0000000..0fa74de --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go @@ -0,0 +1,10 @@ +package archive + +import "github.com/moby/go-archive" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive. +// +// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return archive.CheckSystemDriveAndRemoveDriveLetter(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go deleted file mode 100644 index 390264b..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows - -package archive - -// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_windows.go b/vendor/github.com/docker/docker/pkg/archive/path_windows.go deleted file mode 100644 index 7e18c8e..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package archive - -import ( - "fmt" - "path/filepath" - "strings" -) - -// checkSystemDriveAndRemoveDriveLetter is the Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("no relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("the specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time.go b/vendor/github.com/docker/docker/pkg/archive/time.go deleted file mode 100644 index 4e9ae95..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/time.go +++ /dev/null @@ -1,38 +0,0 @@ -package archive - -import ( - "syscall" - "time" - "unsafe" -) - -var ( - minTime = time.Unix(0, 0) - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -func boundTime(t time.Time) time.Time { - if t.Before(minTime) || t.After(maxTime) { - return minTime - } - - return t -} - -func latestTime(t1, t2 time.Time) time.Time { - if t1.Before(t2) { - return t2 - } - return t1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go b/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go deleted file mode 100644 index 5bfdfa2..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_nonwindows.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !windows - -package archive - -import ( - "os" - "time" - - "golang.org/x/sys/unix" -) - -// chtimes changes the access time and modified time of a file at the given path. -// If the modified time is prior to the Unix Epoch (unixMinTime), or after the -// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this -// case, Chtimes defaults to Unix Epoch, just in case. -func chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func timeToTimespec(time time.Time) unix.Timespec { - if time.IsZero() { - // Return UTIME_OMIT special value - return unix.Timespec{ - Sec: 0, - Nsec: (1 << 30) - 2, - } - } - return unix.NsecToTimespec(time.UnixNano()) -} - -func lchtimes(name string, atime time.Time, mtime time.Time) error { - utimes := [2]unix.Timespec{ - timeToTimespec(atime), - timeToTimespec(mtime), - } - err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW) - if err != nil && err != unix.ENOSYS { - return err - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_windows.go b/vendor/github.com/docker/docker/pkg/archive/time_windows.go deleted file mode 100644 index af1f7c8..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_windows.go +++ /dev/null @@ -1,32 +0,0 @@ -package archive - -import ( - "os" - "time" - - "golang.org/x/sys/windows" -) - -func chtimes(name string, atime time.Time, mtime time.Time) error { - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - pathp, err := windows.UTF16PtrFromString(name) - if err != nil { - return err - } - h, err := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return err - } - defer windows.Close(h) - c := windows.NsecToFiletime(mtime.UnixNano()) - return windows.SetFileTime(h, &c, nil, nil) -} - -func lchtimes(name string, atime time.Time, mtime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils.go b/vendor/github.com/docker/docker/pkg/archive/utils.go new file mode 100644 index 0000000..692cf16 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/utils.go @@ -0,0 +1,42 @@ +package archive + +import ( + "github.com/docker/docker/pkg/idtools" + "github.com/moby/go-archive" +) + +// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions]. +// +// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module. +func ToArchiveOpt(options *TarOptions) *archive.TarOptions { + return toArchiveOpt(options) +} + +func toArchiveOpt(options *TarOptions) *archive.TarOptions { + if options == nil { + return nil + } + + var chownOpts *archive.ChownOpts + if options.ChownOpts != nil { + chownOpts = &archive.ChownOpts{ + UID: options.ChownOpts.UID, + GID: options.ChownOpts.GID, + } + } + + return &archive.TarOptions{ + IncludeFiles: options.IncludeFiles, + ExcludePatterns: options.ExcludePatterns, + Compression: options.Compression, + NoLchown: options.NoLchown, + IDMap: idtools.ToUserIdentityMapping(options.IDMap), + ChownOpts: chownOpts, + IncludeSourceDir: options.IncludeSourceDir, + WhiteoutFormat: options.WhiteoutFormat, + NoOverwriteDirNonDir: options.NoOverwriteDirNonDir, + RebaseNames: options.RebaseNames, + InUserNS: options.InUserNS, + BestEffortXattrs: options.BestEffortXattrs, + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index d20478a..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go new file mode 100644 index 0000000..0ab8590 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go @@ -0,0 +1,10 @@ +package archive + +import "github.com/moby/go-archive" + +const ( + WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead. + WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead. + WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead. + WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead. +) diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index f8a9725..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// - ./foo.txt with content "hello world" -// - ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) [][2]string { - output := make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return output -} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go new file mode 100644 index 0000000..e5d3fa9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go @@ -0,0 +1,14 @@ +package archive + +import ( + "io" + + "github.com/moby/go-archive" +) + +// Generate generates a new archive from the content provided as input. +// +// Deprecated: use [archive.Generate] instead. +func Generate(input ...string) (io.Reader, error) { + return archive.Generate(input...) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go deleted file mode 100644 index 652a1f0..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build linux || darwin || freebsd || netbsd - -package archive - -import ( - "errors" - "fmt" - "io/fs" - - "golang.org/x/sys/unix" -) - -// lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It returns a nil slice and nil error if the xattr is not set. -func lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, err := unix.Lgetxattr(path, attr, dest) - - for errors.Is(err, unix.ERANGE) { - // Buffer too small, use zero-sized buffer to get the actual size - sz, err = unix.Lgetxattr(path, attr, []byte{}) - if err != nil { - return nil, wrapPathError("lgetxattr", path, attr, err) - } - dest = make([]byte, sz) - sz, err = unix.Lgetxattr(path, attr, dest) - } - - if err != nil { - if errors.Is(err, noattr) { - return nil, nil - } - return nil, wrapPathError("lgetxattr", path, attr, err) - } - - return dest[:sz], nil -} - -// lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func lsetxattr(path string, attr string, data []byte, flags int) error { - return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags)) -} - -func wrapPathError(op, path, attr string, err error) error { - if err == nil { - return nil - } - return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)} -} diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go deleted file mode 100644 index f2e7646..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_linux.go +++ /dev/null @@ -1,5 +0,0 @@ -package archive - -import "golang.org/x/sys/unix" - -var noattr = unix.ENODATA diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go b/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go deleted file mode 100644 index 4d88241..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/xattr_supported_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux && !windows - -package archive - -import "golang.org/x/sys/unix" - -var noattr = unix.ENOATTR diff --git a/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go deleted file mode 100644 index b0d9165..0000000 --- a/vendor/github.com/docker/docker/pkg/archive/xattr_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux && !darwin && !freebsd && !netbsd - -package archive - -func lgetxattr(path string, attr string) ([]byte, error) { - return nil, nil -} - -func lsetxattr(path string, attr string, data []byte, flags int) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index d2fbd94..23e90c2 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -3,11 +3,15 @@ package idtools import ( "fmt" "os" + + "github.com/moby/sys/user" ) // IDMap contains a single entry for user namespace range remapping. An array // of IDMap entries represents the structure that will be provided to the Linux // kernel for creating a user namespace. +// +// Deprecated: use [user.IDMap] instead. type IDMap struct { ContainerID int `json:"container_id"` HostID int `json:"host_id"` @@ -17,28 +21,42 @@ type IDMap struct { // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership and permissions. +// +// Deprecated: use [user.MkdirAllAndChown] instead. func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, true) + return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID) } // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership and permissions. // Note that unlike os.Mkdir(), this function does not return IsExist error // in case path already exists. +// +// Deprecated: use [user.MkdirAndChown] instead. func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, false, true) + return user.MkdirAndChown(path, mode, owner.UID, owner.GID) } // MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership or permissions will be performed +// +// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead. func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, false) + return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 +// +// Deprecated: use [(user.IdentityMapping).RootPair] instead. func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + return getRootUIDGID(uidMap, gidMap) +} + +// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { uid, err := toHost(0, uidMap) if err != nil { return -1, -1, err @@ -101,11 +119,61 @@ type IdentityMapping struct { GIDMaps []IDMap `json:"GIDMaps"` } +// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping]. +// +// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. +func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping { + return IdentityMapping{ + UIDMaps: fromUserIDMap(u.UIDMaps), + GIDMaps: fromUserIDMap(u.GIDMaps), + } +} + +func fromUserIDMap(u []user.IDMap) []IDMap { + if u == nil { + return nil + } + m := make([]IDMap, len(u)) + for i := range u { + m[i] = IDMap{ + ContainerID: int(u[i].ID), + HostID: int(u[i].ParentID), + Size: int(u[i].Count), + } + } + return m +} + +// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping]. +// +// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. +func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping { + return user.IdentityMapping{ + UIDMaps: toUserIDMap(u.UIDMaps), + GIDMaps: toUserIDMap(u.GIDMaps), + } +} + +func toUserIDMap(u []IDMap) []user.IDMap { + if u == nil { + return nil + } + m := make([]user.IDMap, len(u)) + for i := range u { + m[i] = user.IDMap{ + ID: int64(u[i].ContainerID), + ParentID: int64(u[i].HostID), + Count: int64(u[i].Size), + } + } + return m +} + // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. func (i IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) + uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) return Identity{UID: uid, GID: gid} } @@ -144,6 +212,8 @@ func (i IdentityMapping) Empty() bool { } // CurrentIdentity returns the identity of the current process +// +// Deprecated: use [os.Getuid] and [os.Getegid] instead. func CurrentIdentity() Identity { return Identity{UID: os.Getuid(), GID: os.Getegid()} } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 1f11fe4..0000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,166 +0,0 @@ -//go:build !windows - -package idtools - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "syscall" - - "github.com/moby/sys/user" -) - -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - path, err := filepath.Abs(path) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil { - if !stat.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if !chownExisting { - return nil - } - - // short-circuit -- we were called with an existing directory and chown was requested - return setPermissions(path, mode, owner, stat) - } - - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if os.IsNotExist(err) { - paths = []string{path} - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err = os.MkdirAll(path, mode); err != nil { - return err - } - } else if err = os.Mkdir(path, mode); err != nil { - return err - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err = setPermissions(pathComponent, mode, owner, nil); err != nil { - return err - } - } - return nil -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username -// -// Deprecated: use [user.LookupUser] instead -func LookupUser(name string) (user.User, error) { - return user.LookupUser(name) -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid -// -// Deprecated: use [user.LookupUid] instead -func LookupUID(uid int) (user.User, error) { - return user.LookupUid(uid) -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// -// Deprecated: use [user.LookupGroup] instead -func LookupGroup(name string) (user.Group, error) { - return user.LookupGroup(name) -} - -// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested -// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the -// dir is on an NFS share, so don't call chown unless we absolutely must. -// Likewise for setting permissions. -func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error { - if stat == nil { - var err error - stat, err = os.Stat(p) - if err != nil { - return err - } - } - if stat.Mode().Perm() != mode.Perm() { - if err := os.Chmod(p, mode.Perm()); err != nil { - return err - } - } - ssi := stat.Sys().(*syscall.Stat_t) - if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) { - return nil - } - return os.Chown(p, owner.UID, owner.GID) -} - -// LoadIdentityMapping takes a requested username and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func LoadIdentityMapping(name string) (IdentityMapping, error) { - // TODO: Consider adding support for calling out to "getent" - usr, err := user.LookupUser(name) - if err != nil { - return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err) - } - - subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) - if err != nil { - return IdentityMapping{}, err - } - subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) - if err != nil { - return IdentityMapping{}, err - } - - return IdentityMapping{ - UIDMaps: subuidRanges, - GIDMaps: subgidRanges, - }, nil -} - -func lookupSubRangesFile(path string, usr user.User) ([]IDMap, error) { - uidstr := strconv.Itoa(usr.Uid) - rangeList, err := user.ParseSubIDFileFilter(path, func(sid user.SubID) bool { - return sid.Name == usr.Name || sid.Name == uidstr - }) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) - } - - idMap := []IDMap{} - - containerID := 0 - for _, idrange := range rangeList { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: int(idrange.SubID), - Size: int(idrange.Count), - }) - containerID = containerID + int(idrange.Count) - } - return idMap, nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index a12b140..f83f59f 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -1,9 +1,5 @@ package idtools -import ( - "os" -) - const ( SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" ) @@ -14,11 +10,3 @@ const ( ContainerUserSidString = "S-1-5-93-2-2" ) - -// This is currently a wrapper around [os.MkdirAll] since currently -// permissions aren't set through this path, the identity isn't utilized. -// Ownership is handled elsewhere, but in the future could be support here -// too. -func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error { - return os.MkdirAll(path, 0) -} diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go index f70a245..cd1bf29 100644 --- a/vendor/github.com/ebitengine/purego/dlfcn.go +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -83,17 +83,17 @@ func loadSymbol(handle uintptr, name string) (uintptr, error) { // appear to work if you link directly to the C function on darwin arm64. //go:linkname dlopen dlopen -var dlopen uintptr +var dlopen uint8 var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) //go:linkname dlsym dlsym -var dlsym uintptr +var dlsym uint8 var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) //go:linkname dlclose dlclose -var dlclose uintptr +var dlclose uint8 var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) //go:linkname dlerror dlerror -var dlerror uintptr +var dlerror uint8 var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go index 5f87627..27f5607 100644 --- a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -17,8 +17,3 @@ const ( //go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" //go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" - -//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" -//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go index 7a3a1bb..d17942e 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -121,81 +121,81 @@ func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { } //go:linkname _malloc _malloc -var _malloc uintptr +var _malloc uint8 var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) //go:linkname _free _free -var _free uintptr +var _free uint8 var freeABI0 = uintptr(unsafe.Pointer(&_free)) //go:linkname _setenv _setenv -var _setenv uintptr +var _setenv uint8 var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) //go:linkname _unsetenv _unsetenv -var _unsetenv uintptr +var _unsetenv uint8 var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) //go:linkname _sigfillset _sigfillset -var _sigfillset uintptr +var _sigfillset uint8 var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) //go:linkname _nanosleep _nanosleep -var _nanosleep uintptr +var _nanosleep uint8 var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) //go:linkname _abort _abort -var _abort uintptr +var _abort uint8 var abortABI0 = uintptr(unsafe.Pointer(&_abort)) //go:linkname _pthread_attr_init _pthread_attr_init -var _pthread_attr_init uintptr +var _pthread_attr_init uint8 var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) //go:linkname _pthread_create _pthread_create -var _pthread_create uintptr +var _pthread_create uint8 var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) //go:linkname _pthread_detach _pthread_detach -var _pthread_detach uintptr +var _pthread_detach uint8 var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) //go:linkname _pthread_sigmask _pthread_sigmask -var _pthread_sigmask uintptr +var _pthread_sigmask uint8 var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) //go:linkname _pthread_self _pthread_self -var _pthread_self uintptr +var _pthread_self uint8 var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) //go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np -var _pthread_get_stacksize_np uintptr +var _pthread_get_stacksize_np uint8 var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) //go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize -var _pthread_attr_getstacksize uintptr +var _pthread_attr_getstacksize uint8 var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) //go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize -var _pthread_attr_setstacksize uintptr +var _pthread_attr_setstacksize uint8 var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) //go:linkname _pthread_attr_destroy _pthread_attr_destroy -var _pthread_attr_destroy uintptr +var _pthread_attr_destroy uint8 var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) //go:linkname _pthread_mutex_lock _pthread_mutex_lock -var _pthread_mutex_lock uintptr +var _pthread_mutex_lock uint8 var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) //go:linkname _pthread_mutex_unlock _pthread_mutex_unlock -var _pthread_mutex_unlock uintptr +var _pthread_mutex_unlock uint8 var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) //go:linkname _pthread_cond_broadcast _pthread_cond_broadcast -var _pthread_cond_broadcast uintptr +var _pthread_cond_broadcast uint8 var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) //go:linkname _pthread_setspecific _pthread_setspecific -var _pthread_setspecific uintptr +var _pthread_setspecific uint8 var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/go-ole/go-ole/SECURITY.md b/vendor/github.com/go-ole/go-ole/SECURITY.md new file mode 100644 index 0000000..dac2815 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/go-ole/go-ole/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml index 0d557ac..8df7fa2 100644 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -6,14 +6,9 @@ version: "1.3.0.{build}-alpha-{branch}" -os: Windows Server 2012 R2 +os: Visual Studio 2019 -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 +build: off skip_tags: true @@ -21,20 +16,40 @@ clone_folder: c:\gopath\src\github.com\go-ole\go-ole environment: GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% +before_test: # - Download COM Server - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set + +test_script: + - go test -v -cover ./... + # go vet has false positives on unsafe.Pointer with windows/sys. Disabling since it is recommended to use go test instead. + # - go vet ./... + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +matrix: + allow_failures: + - environment: + GOROOT: C:\go-x86 + DOWNLOADPLATFORM: "x86" + - environment: + GOROOT: C:\go118 + DOWNLOADPLATFORM: "x64" + - environment: + GOROOT: C:\go118-x86 + DOWNLOADPLATFORM: "x86" + +install: - go version - go env - go get -u golang.org/x/tools/cmd/cover @@ -45,10 +60,9 @@ build_script: - cd c:\gopath\src\github.com\go-ole\go-ole - go get -v -t ./... - go build - - go test -v -cover ./... # disable automatic tests -test: off +test: on # disable deployment deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go index a9bef15..cabbac0 100644 --- a/vendor/github.com/go-ole/go-ole/com.go +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -11,6 +11,7 @@ import ( var ( procCoInitialize = modole32.NewProc("CoInitialize") procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitializeSecurity = modole32.NewProc("CoInitializeSecurity") procCoUninitialize = modole32.NewProc("CoUninitialize") procCoCreateInstance = modole32.NewProc("CoCreateInstance") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") @@ -37,6 +38,9 @@ var ( procDispatchMessageW = moduser32.NewProc("DispatchMessageW") ) +// This is to enable calling COM Security initialization multiple times +var bSecurityInit bool = false + // coInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -68,6 +72,35 @@ func coInitializeEx(coinit uint32) (err error) { return } +// coInitializeSecurity: Registers security and sets the default security values +// for the process. +func coInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + // Check COM Security initialization has done previously + if !bSecurityInit { + // https://learn.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializesecurity + hr, _, _ := procCoInitializeSecurity.Call( + uintptr(0), // Allow *all* VSS writers to communicate back! + uintptr(cAuthSvc), // Default COM authentication service + uintptr(0), // Default COM authorization service + uintptr(0), // Reserved parameter + uintptr(dwAuthnLevel), // Strongest COM authentication level + uintptr(dwImpLevel), // Minimal impersonation abilities + uintptr(0), // Default COM authentication settings + uintptr(dwCapabilities), // Cloaking + uintptr(0)) // eserved parameter + if hr != 0 { + err = NewError(hr) + } else { + // COM Security initialization done make global flag true. + bSecurityInit = true + } + } + return +} + // CoInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -96,6 +129,15 @@ func CoUninitialize() { procCoUninitialize.Call() } +// CoInitializeSecurity: Registers security and sets the default security values +// for the process. +func CoInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + return coInitializeSecurity(cAuthSvc, dwAuthnLevel, dwImpLevel, dwCapabilities) +} + // CoTaskMemFree frees memory pointer. func CoTaskMemFree(memptr uintptr) { procCoTaskMemFree.Call(memptr) diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go index b399f04..649c073 100644 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ole @@ -92,7 +93,7 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} case int8: vargs[n] = NewVariant(VT_I1, int64(v.(int8))) case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int8))))) case int16: vargs[n] = NewVariant(VT_I2, int64(v.(int16))) case *int16: diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go index 967a23f..a2c8402 100644 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -99,7 +99,7 @@ func (v *VARIANT) Value() interface{} { case VT_DISPATCH: return v.ToIDispatch() case VT_BOOL: - return v.Val != 0 + return (v.Val & 0xffff) != 0 } return nil } diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md new file mode 100644 index 0000000..2abf98e --- /dev/null +++ b/vendor/github.com/go-stack/stack/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Chris Hines + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md new file mode 100644 index 0000000..f11cccc --- /dev/null +++ b/vendor/github.com/go-stack/stack/README.md @@ -0,0 +1,38 @@ +[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) +[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) +[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) +[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) + +# stack + +Package stack implements utilities to capture, manipulate, and format call +stacks. It provides a simpler API than package runtime. + +The implementation takes care of the minutia and special cases of interpreting +the program counter (pc) values returned by runtime.Callers. + +## Versioning + +Package stack publishes releases via [semver](http://semver.org/) compatible Git +tags prefixed with a single 'v'. The master branch always contains the latest +release. The develop branch contains unreleased commits. + +## Formatting + +Package stack's types implement fmt.Formatter, which provides a simple and +flexible way to declaratively configure formatting when used with logging or +error tracking packages. + +```go +func DoTheThing() { + c := stack.Caller(0) + log.Print(c) // "source.go:10" + log.Printf("%+v", c) // "pkg/path/source.go:10" + log.Printf("%n", c) // "DoTheThing" + + s := stack.Trace().TrimRuntime() + log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" +} +``` + +See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go new file mode 100644 index 0000000..ac3b93b --- /dev/null +++ b/vendor/github.com/go-stack/stack/stack.go @@ -0,0 +1,400 @@ +// +build go1.7 + +// Package stack implements utilities to capture, manipulate, and format call +// stacks. It provides a simpler API than package runtime. +// +// The implementation takes care of the minutia and special cases of +// interpreting the program counter (pc) values returned by runtime.Callers. +// +// Package stack's types implement fmt.Formatter, which provides a simple and +// flexible way to declaratively configure formatting when used with logging +// or error tracking packages. +package stack + +import ( + "bytes" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "strings" +) + +// Call records a single function invocation from a goroutine stack. +type Call struct { + frame runtime.Frame +} + +// Caller returns a Call from the stack of the current goroutine. The argument +// skip is the number of stack frames to ascend, with 0 identifying the +// calling function. +func Caller(skip int) Call { + // As of Go 1.9 we need room for up to three PC entries. + // + // 0. An entry for the stack frame prior to the target to check for + // special handling needed if that prior entry is runtime.sigpanic. + // 1. A possible second entry to hold metadata about skipped inlined + // functions. If inline functions were not skipped the target frame + // PC will be here. + // 2. A third entry for the target frame PC when the second entry + // is used for skipped inline functions. + var pcs [3]uintptr + n := runtime.Callers(skip+1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + frame, _ := frames.Next() + frame, _ = frames.Next() + + return Call{ + frame: frame, + } +} + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). +func (c Call) String() string { + return fmt.Sprint(c) +} + +// MarshalText implements encoding.TextMarshaler. It formats the Call the same +// as fmt.Sprintf("%v", c). +func (c Call) MarshalText() ([]byte, error) { + if c.frame == (runtime.Frame{}) { + return nil, ErrNoFunc + } + + buf := bytes.Buffer{} + fmt.Fprint(&buf, c) + return buf.Bytes(), nil +} + +// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely +// cause is a Call with the zero value. +var ErrNoFunc = errors.New("no call stack information") + +// Format implements fmt.Formatter with support for the following verbs. +// +// %s source file +// %d line number +// %n function name +// %k last segment of the package path +// %v equivalent to %s:%d +// +// It accepts the '+' and '#' flags for most of the verbs as follows. +// +// %+s path of source file relative to the compile time GOPATH, +// or the module path joined to the path of source file relative +// to module root +// %#s full path of source file +// %+n import path qualified function name +// %+k full package path +// %+v equivalent to %+s:%d +// %#v equivalent to %#s:%d +func (c Call) Format(s fmt.State, verb rune) { + if c.frame == (runtime.Frame{}) { + fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) + return + } + + switch verb { + case 's', 'v': + file := c.frame.File + switch { + case s.Flag('#'): + // done + case s.Flag('+'): + file = pkgFilePath(&c.frame) + default: + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + } + io.WriteString(s, file) + if verb == 'v' { + buf := [7]byte{':'} + s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) + } + + case 'd': + buf := [6]byte{} + s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) + + case 'k': + name := c.frame.Function + const pathSep = "/" + start, end := 0, len(name) + if i := strings.LastIndex(name, pathSep); i != -1 { + start = i + len(pathSep) + } + const pkgSep = "." + if i := strings.Index(name[start:], pkgSep); i != -1 { + end = start + i + } + if s.Flag('+') { + start = 0 + } + io.WriteString(s, name[start:end]) + + case 'n': + name := c.frame.Function + if !s.Flag('+') { + const pathSep = "/" + if i := strings.LastIndex(name, pathSep); i != -1 { + name = name[i+len(pathSep):] + } + const pkgSep = "." + if i := strings.Index(name, pkgSep); i != -1 { + name = name[i+len(pkgSep):] + } + } + io.WriteString(s, name) + } +} + +// Frame returns the call frame infomation for the Call. +func (c Call) Frame() runtime.Frame { + return c.frame +} + +// PC returns the program counter for this call frame; multiple frames may +// have the same PC value. +// +// Deprecated: Use Call.Frame instead. +func (c Call) PC() uintptr { + return c.frame.PC +} + +// CallStack records a sequence of function invocations from a goroutine +// stack. +type CallStack []Call + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). +func (cs CallStack) String() string { + return fmt.Sprint(cs) +} + +var ( + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + spaceBytes = []byte(" ") +) + +// MarshalText implements encoding.TextMarshaler. It formats the CallStack the +// same as fmt.Sprintf("%v", cs). +func (cs CallStack) MarshalText() ([]byte, error) { + buf := bytes.Buffer{} + buf.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + buf.Write(spaceBytes) + } + fmt.Fprint(&buf, pc) + } + buf.Write(closeBracketBytes) + return buf.Bytes(), nil +} + +// Format implements fmt.Formatter by printing the CallStack as square brackets +// ([, ]) surrounding a space separated list of Calls each formatted with the +// supplied verb and options. +func (cs CallStack) Format(s fmt.State, verb rune) { + s.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + s.Write(spaceBytes) + } + pc.Format(s, verb) + } + s.Write(closeBracketBytes) +} + +// Trace returns a CallStack for the current goroutine with element 0 +// identifying the calling function. +func Trace() CallStack { + var pcs [512]uintptr + n := runtime.Callers(1, pcs[:]) + + frames := runtime.CallersFrames(pcs[:n]) + cs := make(CallStack, 0, n) + + // Skip extra frame retrieved just to make sure the runtime.sigpanic + // special case is handled. + frame, more := frames.Next() + + for more { + frame, more = frames.Next() + cs = append(cs, Call{frame: frame}) + } + + return cs +} + +// TrimBelow returns a slice of the CallStack with all entries below c +// removed. +func (cs CallStack) TrimBelow(c Call) CallStack { + for len(cs) > 0 && cs[0] != c { + cs = cs[1:] + } + return cs +} + +// TrimAbove returns a slice of the CallStack with all entries above c +// removed. +func (cs CallStack) TrimAbove(c Call) CallStack { + for len(cs) > 0 && cs[len(cs)-1] != c { + cs = cs[:len(cs)-1] + } + return cs +} + +// pkgIndex returns the index that results in file[index:] being the path of +// file relative to the compile time GOPATH, and file[:index] being the +// $GOPATH/src/ portion of file. funcName must be the name of a function in +// file as returned by runtime.Func.Name. +func pkgIndex(file, funcName string) int { + // As of Go 1.6.2 there is no direct way to know the compile time GOPATH + // at runtime, but we can infer the number of path segments in the GOPATH. + // We note that runtime.Func.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // file[:idx] == /home/user/src/ + // file[idx:] == pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired result for file[idx:]. We count separators from the + // end of the file path until it finds two more than in the function name + // and then move one character forward to preserve the initial path + // segment without a leading separator. + const sep = "/" + i := len(file) + for n := strings.Count(funcName, sep) + 2; n > 0; n-- { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + return i + len(sep) +} + +// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, +// or its module path joined to its path relative to the module root. +// +// As of Go 1.11 there is no direct way to know the compile time GOPATH or +// module paths at runtime, but we can piece together the desired information +// from available information. We note that runtime.Frame.Function contains the +// function name qualified by the package path, which includes the module path +// but not the GOPATH. We can extract the package path from that and append the +// last segments of the file path to arrive at the desired package qualified +// file path. For example, given: +// +// GOPATH /home/user +// import path pkg/sub +// frame.File /home/user/src/pkg/sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/sub/file.go +// +// It appears that we simply need to trim ".Type.Method" from frame.Function and +// append "/" + path.Base(file). +// +// But there are other wrinkles. Although it is idiomatic to do so, the internal +// name of a package is not required to match the last segment of its import +// path. In addition, the introduction of modules in Go 1.11 allows working +// without a GOPATH. So we also must make these work right: +// +// GOPATH /home/user +// import path pkg/go-sub +// package name sub +// frame.File /home/user/src/pkg/go-sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/go-sub/file.go +// +// Module path pkg/v2 +// import path pkg/v2/go-sub +// package name sub +// frame.File /home/user/cloned-pkg/go-sub/file.go +// frame.Function pkg/v2/sub.Type.Method +// Desired return pkg/v2/go-sub/file.go +// +// We can handle all of these situations by using the package path extracted +// from frame.Function up to, but not including, the last segment as the prefix +// and the last two segments of frame.File as the suffix of the returned path. +// This preserves the existing behavior when working in a GOPATH without modules +// and a semantically equivalent behavior when used in module aware project. +func pkgFilePath(frame *runtime.Frame) string { + pre := pkgPrefix(frame.Function) + post := pathSuffix(frame.File) + if pre == "" { + return post + } + return pre + "/" + post +} + +// pkgPrefix returns the import path of the function's package with the final +// segment removed. +func pkgPrefix(funcName string) string { + const pathSep = "/" + end := strings.LastIndex(funcName, pathSep) + if end == -1 { + return "" + } + return funcName[:end] +} + +// pathSuffix returns the last two segments of path. +func pathSuffix(path string) string { + const pathSep = "/" + lastSep := strings.LastIndex(path, pathSep) + if lastSep == -1 { + return path + } + return path[strings.LastIndex(path[:lastSep], pathSep)+1:] +} + +var runtimePath string + +func init() { + var pcs [3]uintptr + runtime.Callers(0, pcs[:]) + frames := runtime.CallersFrames(pcs[:]) + frame, _ := frames.Next() + file := frame.File + + idx := pkgIndex(frame.File, frame.Function) + + runtimePath = file[:idx] + if runtime.GOOS == "windows" { + runtimePath = strings.ToLower(runtimePath) + } +} + +func inGoroot(c Call) bool { + file := c.frame.File + if len(file) == 0 || file[0] == '?' { + return true + } + if runtime.GOOS == "windows" { + file = strings.ToLower(file) + } + return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") +} + +// TrimRuntime returns a slice of the CallStack with the topmost entries from +// the go runtime removed. It considers any calls originating from unknown +// files, files under GOROOT, or _testmain.go as part of the runtime. +func (cs CallStack) TrimRuntime() CallStack { + for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { + cs = cs[:len(cs)-1] + } + return cs +} diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md index a21700c..70e5386 100644 --- a/vendor/github.com/lufia/plan9stats/README.md +++ b/vendor/github.com/lufia/plan9stats/README.md @@ -1,2 +1,10 @@ # plan9stats A module for retrieving statistics of Plan 9 + +[![GoDev][godev-image]][godev-url] +[![Actions Status][actions-image]][actions-url] + +[godev-image]: https://pkg.go.dev/badge/github.com/lufia/plan9stats +[godev-url]: https://pkg.go.dev/github.com/lufia/plan9stats +[actions-image]: https://github.com/lufia/plan9stats/workflows/Test/badge.svg?branch=main +[actions-url]: https://github.com/lufia/plan9stats/actions?workflow=Test diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go index a101b91..eaff362 100644 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -178,9 +178,12 @@ func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { var up uint32parser pids := make([]uint32, len(names)) for i, s := range names { + if s == "trace" { + continue + } pids[i] = up.Parse(s) } - if up.err != nil { + if err := up.err; err != nil { return nil, err } sort.Slice(pids, func(i, j int) bool { diff --git a/vendor/github.com/lufia/plan9stats/disk.go b/vendor/github.com/lufia/plan9stats/disk.go new file mode 100644 index 0000000..4a4fa0c --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/disk.go @@ -0,0 +1,116 @@ +package stats + +import ( + "bufio" + "bytes" + "context" + "os" + "path/filepath" + "strings" +) + +// Storage represents /dev/sdXX/ctl. +type Storage struct { + Name string + Model string + Capacity int64 + Partitions []*Partition +} + +// Partition represents a part of /dev/sdXX/ctl. +type Partition struct { + Name string + Start uint64 + End uint64 +} + +func ReadStorages(ctx context.Context, opts ...Option) ([]*Storage, error) { + cfg := newConfig(opts...) + sdctl := filepath.Join(cfg.rootdir, "/dev/sdctl") + f, err := os.Open(sdctl) + if err != nil { + return nil, err + } + defer f.Close() + + var a []*Storage + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Split(scanner.Bytes(), delim) + if len(fields) == 0 { + continue + } + exp := string(fields[0]) + "*" + if !strings.HasPrefix(exp, "sd") { + continue + } + dir := filepath.Join(cfg.rootdir, "/dev", exp) + m, err := filepath.Glob(dir) + if err != nil { + return nil, err + } + for _, dir := range m { + s, err := readStorage(dir) + if err != nil { + return nil, err + } + a = append(a, s) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return a, nil +} + +func readStorage(dir string) (*Storage, error) { + ctl := filepath.Join(dir, "ctl") + f, err := os.Open(ctl) + if err != nil { + return nil, err + } + defer f.Close() + + var s Storage + s.Name = filepath.Base(dir) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Bytes() + switch { + case bytes.HasPrefix(line, []byte("inquiry ")): + s.Model = string(bytes.TrimSpace(line[7:])) + case bytes.HasPrefix(line, []byte("geometry ")): + fields := bytes.Split(line, delim) + if len(fields) < 3 { + continue + } + var p intParser + sec := p.ParseInt64(string(fields[1]), 10) + size := p.ParseInt64(string(fields[2]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Capacity = sec * size + case bytes.HasPrefix(line, []byte("part ")): + fields := bytes.Split(line, delim) + if len(fields) < 4 { + continue + } + var p intParser + start := p.ParseUint64(string(fields[2]), 10) + end := p.ParseUint64(string(fields[3]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Partitions = append(s.Partitions, &Partition{ + Name: string(fields[1]), + Start: start, + End: end, + }) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &s, nil +} diff --git a/vendor/github.com/lufia/plan9stats/host.go b/vendor/github.com/lufia/plan9stats/host.go index 957e903..a3921c0 100644 --- a/vendor/github.com/lufia/plan9stats/host.go +++ b/vendor/github.com/lufia/plan9stats/host.go @@ -109,12 +109,6 @@ func parseGauge(s string, r *Gauge) error { return nil } -type Storage struct { - Name string - Model string - Capacity int64 -} - type Interface struct { Name string Addr string @@ -177,7 +171,7 @@ func ReadHost(ctx context.Context, opts ...Option) (*Host, error) { } h.Sysname = name - a, err := readStorages(cfg.rootdir) + a, err := ReadStorages(ctx, opts...) if err != nil { return nil, err } @@ -203,80 +197,6 @@ func readSysname(rootdir string) (string, error) { return string(bytes.TrimSpace(b)), nil } -func readStorages(rootdir string) ([]*Storage, error) { - sdctl := filepath.Join(rootdir, "/dev/sdctl") - f, err := os.Open(sdctl) - if err != nil { - return nil, err - } - defer f.Close() - - var a []*Storage - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.Split(scanner.Bytes(), delim) - if len(fields) == 0 { - continue - } - exp := string(fields[0]) + "*" - if !strings.HasPrefix(exp, "sd") { - continue - } - dir := filepath.Join(rootdir, "/dev", exp) - m, err := filepath.Glob(dir) - if err != nil { - return nil, err - } - for _, dir := range m { - s, err := readStorage(dir) - if err != nil { - return nil, err - } - a = append(a, s) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return a, nil -} - -func readStorage(dir string) (*Storage, error) { - ctl := filepath.Join(dir, "ctl") - f, err := os.Open(ctl) - if err != nil { - return nil, err - } - defer f.Close() - - var s Storage - s.Name = filepath.Base(dir) - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Bytes() - switch { - case bytes.HasPrefix(line, []byte("inquiry")): - s.Model = string(bytes.TrimSpace(line[7:])) - case bytes.HasPrefix(line, []byte("geometry")): - fields := bytes.Split(line, delim) - if len(fields) < 3 { - continue - } - var p intParser - sec := p.ParseInt64(string(fields[1]), 10) - size := p.ParseInt64(string(fields[2]), 10) - if err := p.Err(); err != nil { - return nil, err - } - s.Capacity = sec * size - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &s, nil -} - type IPStats struct { ID int // number of interface in ipifc dir Device string // associated physical device diff --git a/vendor/github.com/lufia/plan9stats/int.go b/vendor/github.com/lufia/plan9stats/int.go index db133c4..e3c9dc8 100644 --- a/vendor/github.com/lufia/plan9stats/int.go +++ b/vendor/github.com/lufia/plan9stats/int.go @@ -26,6 +26,15 @@ func (p *intParser) ParseInt64(s string, base int) int64 { return n } +func (p *intParser) ParseUint64(s string, base int) uint64 { + if p.err != nil { + return 0 + } + var n uint64 + n, p.err = strconv.ParseUint(s, base, 64) + return n +} + func (p *intParser) Err() error { return p.err } diff --git a/vendor/github.com/moby/go-archive/.gitattributes b/vendor/github.com/moby/go-archive/.gitattributes new file mode 100644 index 0000000..4bb139d --- /dev/null +++ b/vendor/github.com/moby/go-archive/.gitattributes @@ -0,0 +1,2 @@ +*.go -text diff=golang +*.go text eol=lf diff --git a/vendor/github.com/moby/go-archive/.gitignore b/vendor/github.com/moby/go-archive/.gitignore new file mode 100644 index 0000000..3f2bc47 --- /dev/null +++ b/vendor/github.com/moby/go-archive/.gitignore @@ -0,0 +1 @@ +/coverage.txt diff --git a/vendor/github.com/moby/go-archive/.golangci.yml b/vendor/github.com/moby/go-archive/.golangci.yml new file mode 100644 index 0000000..21439e5 --- /dev/null +++ b/vendor/github.com/moby/go-archive/.golangci.yml @@ -0,0 +1,33 @@ +version: "2" + +issues: + # Disable maximum issues count per one linter. + max-issues-per-linter: 0 + # Disable maximum count of issues with the same text. + max-same-issues: 0 + +linters: + enable: + - errorlint + - unconvert + - unparam + exclusions: + generated: disable + presets: + - comments + - std-error-handling + settings: + staticcheck: + # Enable all options, with some exceptions. + # For defaults, see https://golangci-lint.run/usage/linters/#staticcheck + checks: + - all + - -QF1008 # Omit embedded fields from selector expression; https://staticcheck.dev/docs/checks/#QF1008 + - -ST1003 # Poorly chosen identifier; https://staticcheck.dev/docs/checks/#ST1003 + +formatters: + enable: + - gofumpt + - goimports + exclusions: + generated: disable diff --git a/vendor/github.com/moby/go-archive/LICENSE b/vendor/github.com/moby/go-archive/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/moby/go-archive/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/go-archive/archive.go b/vendor/github.com/moby/go-archive/archive.go new file mode 100644 index 0000000..7a105ae --- /dev/null +++ b/vendor/github.com/moby/go-archive/archive.go @@ -0,0 +1,1169 @@ +// Package archive provides helper functions for dealing with archive files. +package archive + +import ( + "archive/tar" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/containerd/log" + "github.com/moby/patternmatcher" + "github.com/moby/sys/sequential" + "github.com/moby/sys/user" + + "github.com/moby/go-archive/compression" + "github.com/moby/go-archive/tarheader" +) + +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not +// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and +// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. +// +// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is +// subject to change in Moby at any time -- image authors who require consistent or known directory permissions +// should explicitly control them by ensuring that header entries exist for any applicable path. +const ImpliedDirectoryMode = 0o755 + +type ( + // Compression is the state represents if compressed or not. + // + // Deprecated: use [compression.Compression]. + Compression = compression.Compression + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + ChownOpts struct { + UID int + GID int + } + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression compression.Compression + NoLchown bool + IDMap user.IdentityMapping + ChownOpts *ChownOpts + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // Allow unpacking to succeed in spite of failures to set extended + // attributes on the unpacked files due to the destination filesystem + // not supporting them or a lack of permissions. Extended attributes + // were probably in the archive for a reason, so set this option at + // your own peril. + BestEffortXattrs bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping user.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + Uncompressed = compression.None // Deprecated: use [compression.None]. + Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2]. + Gzip = compression.Gzip // Deprecated: use [compression.Gzip]. + Xz = compression.Xz // Deprecated: use [compression.Xz]. + Zstd = compression.Zstd // Deprecated: use [compression.Zstd]. +) + +const ( + AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts + OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := compression.DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +// +// Deprecated: use [compression.Detect]. +func DetectCompression(source []byte) compression.Compression { + return compression.Detect(source) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +// +// Deprecated: use [compression.DecompressStream]. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + return compression.DecompressStream(archive) +} + +// CompressStream compresses the dest with specified compression algorithm. +// +// Deprecated: use [compression.CompressStream]. +func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) { + return compression.CompressStream(dest, comp) +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + if header.Name == "" { + header.Name = name + } + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if err := copyWithBuffer(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Deprecated: use [tarheader.FileInfoHeaderNoLookups]. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + return tarheader.FileInfoHeaderNoLookups(fi, link) +} + +// FileInfoHeader creates a populated Header from fi. +// +// Compared to the archive/tar package, this function fills in less information +// but is safe to call from a chrooted process. The AccessTime and ChangeTime +// fields are not set in the returned header, ModTime is truncated to one-second +// precision, and the Uname and Gname fields are only set when fi is a FileInfo +// value returned from tar.Header.FileInfo(). +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tarheader.FileInfoHeaderNoLookups(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + hdr.Name = canonicalTarName(name, fi.IsDir()) + return hdr, nil +} + +const paxSchilyXattr = "SCHILY.xattr." + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + const ( + // Values based on linux/include/uapi/linux/capability.h + xattrCapsSz2 = 20 + versionOffset = 3 + vfsCapRevision2 = 2 + vfsCapRevision3 = 3 + ) + capability, _ := lgetxattr(path, "security.capability") + if capability != nil { + if capability[versionOffset] == vfsCapRevision3 { + // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no + // sense outside the user namespace the archive is built in. + capability[versionOffset] = vfsCapRevision2 + capability = capability[:xattrCapsSz2] + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping user.IdentityMapping + ChownOpts *ChownOpts + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping user.IdentityMapping, writer io.Writer, chownOpts *ChownOpts) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent POSIX-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = filepath.ToSlash(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // check whether the file is overlayfs whiteout + // if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(uid, gid) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use sequential file access to avoid depleting the standby list on + // Windows. On Linux, this equates to a regular os.Open. + file, err := sequential.Open(path) + if err != nil { + return err + } + + err = copyWithBuffer(ta.TarWriter, file) + file.Close() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { + var ( + Lchown = true + inUserns, bestEffortXattrs bool + chownOpts *ChownOpts + ) + + // TODO(thaJeztah): make opts a required argument. + if opts != nil { + Lchown = !opts.NoLchown + inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally. + chownOpts = opts.ChownOpts + bestEffortXattrs = opts.BestEffortXattrs + } + + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg: + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. + file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if err := copyWithBuffer(file, reader); err != nil { + _ = file.Close() + return err + } + _ = file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns") + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + if inUserns && errors.Is(err, syscall.EPERM) { + // In most cases, cannot create a fifo if running in user namespace + log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns") + return nil + } + return err + } + + case tar.TypeLink: + // #nosec G305 -- The target path is checked for path traversal. + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &ChownOpts{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + var msg string + if inUserns && errors.Is(err, syscall.EINVAL) { + msg = " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" + } + return fmt.Errorf("failed to Lchown %q for UID %d, GID %d%s: %w", path, hdr.Uid, hdr.Gid, msg, err) + } + } + + var xattrErrs []string + for key, value := range hdr.PAXRecords { + xattr, ok := strings.CutPrefix(key, paxSchilyXattr) + if !ok { + continue + } + if err := lsetxattr(path, xattr, []byte(value), 0); err != nil { + if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + xattrErrs = append(xattrErrs, err.Error()) + continue + } + return err + } + } + + if len(xattrErrs) > 0 { + log.G(context.TODO()).WithFields(log.Fields{ + "errors": xattrErrs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := boundTime(latestTime(hdr.AccessTime, hdr.ModTime)) + mTime := boundTime(hdr.ModTime) + + // chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := chtimes(path, aTime, mTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := chtimes(path, aTime, mTime); err != nil { + return err + } + } else { + if err := lchtimes(path, aTime, mTime); err != nil { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, comp compression.Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: comp}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + tb, err := NewTarballer(srcPath, options) + if err != nil { + return nil, err + } + go tb.Do() + return tb.Reader(), nil +} + +// Tarballer is a lower-level interface to TarWithOptions which gives the caller +// control over which goroutine the archiving operation executes on. +type Tarballer struct { + srcPath string + options *TarOptions + pm *patternmatcher.PatternMatcher + pipeReader *io.PipeReader + pipeWriter *io.PipeWriter + compressWriter io.WriteCloser + whiteoutConverter tarWhiteoutConverter +} + +// NewTarballer constructs a new tarballer. The arguments are the same as for +// TarWithOptions. +func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) { + pm, err := patternmatcher.New(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := compression.CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + return &Tarballer{ + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath: addLongPathPrefix(srcPath), + options: options, + pm: pm, + pipeReader: pipeReader, + pipeWriter: pipeWriter, + compressWriter: compressWriter, + whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + }, nil +} + +// Reader returns the reader for the created archive. +func (t *Tarballer) Reader() io.ReadCloser { + return t.pipeReader +} + +// Do performs the archiving operation in the background. The resulting archive +// can be read from t.Reader(). Do should only be called once on each Tarballer +// instance. +func (t *Tarballer) Do() { + ta := newTarAppender( + t.options.IDMap, + t.compressWriter, + t.options.ChownOpts, + ) + ta.WhiteoutConverter = t.whiteoutConverter + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) + } + if err := t.compressWriter.Close(); err != nil { + log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) + } + if err := t.pipeWriter.Close(); err != nil { + log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) + } + }() + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(t.srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(t.options.IncludeFiles) > 0 { + log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(t.srcPath) + t.srcPath = dir + t.options.IncludeFiles = []string{base} + } + + if len(t.options.IncludeFiles) == 0 { + t.options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range t.options.IncludeFiles { + rebaseName := t.options.RebaseNames[include] + + var ( + parentMatchInfo []patternmatcher.MatchInfo + parentDirs []string + ) + + walkRoot := getWalkRoot(t.srcPath, include) + // TODO(thaJeztah): should this error be handled? + _ = filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { + if err != nil { + log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(t.srcPath, filePath) + if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if t.options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + for len(parentDirs) != 0 { + lastParentDir := parentDirs[len(parentDirs)-1] + if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { + break + } + parentDirs = parentDirs[:len(parentDirs)-1] + parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] + } + + var matchInfo patternmatcher.MatchInfo + if len(parentMatchInfo) != 0 { + skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) + } else { + skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) + } + if err != nil { + log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) + return err + } + + if f.IsDir() { + parentDirs = append(parentDirs, relFilePath) + parentMatchInfo = append(parentMatchInfo, matchInfo) + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !t.pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range t.pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if errors.Is(err, io.ErrClosedPipe) { + return err + } + } + return nil + }) + } +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + + var dirs []*tar.Header + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if errors.Is(err, io.EOF) { + // end of tar archive + break + } + if err != nil { + return err + } + + // ignore XGlobalHeader early to avoid creating parent directories for them + if hdr.Typeflag == tar.TypeXGlobalHeader { + log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + continue + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return err + } + + // #nosec G305 -- The joined path is checked for path traversal. + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + + if err := remapIDs(options.IDMap, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, tr, options); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + path := filepath.Join(dest, hdr.Name) + + if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { + return err + } + } + return nil +} + +// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do +// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is +// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus +// we most both create them and choose metadata like permissions. +// +// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS +// on which the daemon is running. This precondition is required because this function assumes a OS-specific path +// separator when checking that a path is not the root. +func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { + // Not the root directory, ensure that the parent directory exists + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some + // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche + // usage that reduces the portability of an image. + uid, gid := options.IDMap.RootPair() + + err = user.MkdirAllAndChown(parentPath, ImpliedDirectoryMode, uid, gid, user.WithOnlyNew) + if err != nil { + return err + } + } + } + + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return errors.New("empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := compression.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + archive, err := Tar(src, compression.None) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, &TarOptions{ + IDMap: archiver.IDMapping, + }) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + return archiver.Untar(archive, dst, &TarOptions{ + IDMap: archiver.IDMapping, + }) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + uid, gid := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + if err := user.MkdirAllAndChown(dst, 0o755, uid, gid, user.WithOnlyNew); err != nil { + return err + } + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return errors.New("can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tarheader.FileInfoHeaderNoLookups(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if err := copyWithBuffer(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() user.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping user.IdentityMapping, hdr *tar.Header) error { + uid, gid, err := idMapping.ToHost(hdr.Uid, hdr.Gid) + hdr.Uid, hdr.Gid = uid, gid + return err +} diff --git a/vendor/github.com/moby/go-archive/archive_linux.go b/vendor/github.com/moby/go-archive/archive_linux.go new file mode 100644 index 0000000..7b6c3e0 --- /dev/null +++ b/vendor/github.com/moby/go-archive/archive_linux.go @@ -0,0 +1,107 @@ +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/moby/sys/userns" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0o600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir == 0 { + // FIXME(thaJeztah): return a sentinel error instead of nil, nil + return nil, nil + } + + opaqueXattrName := "trusted.overlay.opaque" + if userns.RunningInUserNS() { + opaqueXattrName = "user.overlay.opaque" + } + + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := lgetxattr(path, opaqueXattrName) + if err != nil { + return nil, err + } + if len(opaque) != 1 || opaque[0] != 'y' { + // FIXME(thaJeztah): return a sentinel error instead of nil, nil + return nil, nil + } + delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName) + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + return &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted. + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + }, nil +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + opaqueXattrName := "trusted.overlay.opaque" + if userns.RunningInUserNS() { + opaqueXattrName = "user.overlay.opaque" + } + + err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0) + if err != nil { + return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err) + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err) + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/moby/go-archive/archive_other.go b/vendor/github.com/moby/go-archive/archive_other.go new file mode 100644 index 0000000..6495549 --- /dev/null +++ b/vendor/github.com/moby/go-archive/archive_other.go @@ -0,0 +1,7 @@ +//go:build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/moby/go-archive/archive_unix.go b/vendor/github.com/moby/go-archive/archive_unix.go new file mode 100644 index 0000000..3a9f5b0 --- /dev/null +++ b/vendor/github.com/moby/go-archive/archive_unix.go @@ -0,0 +1,86 @@ +//go:build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// addLongPathPrefix adds the Windows long path prefix to the path provided if +// it does not already have it. It is a no-op on platforms other than Windows. +func addLongPathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func getInodeFromStat(stat interface{}) (uint64, error) { + s, ok := stat.(*syscall.Stat_t) + if !ok { + // FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897 + return 0, nil + } + return s.Ino, nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return 0, 0, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo. +// +// Creating device nodes is not supported when running in a user namespace, +// produces a [syscall.EPERM] in most cases. +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 0o7777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/go-archive/archive_windows.go b/vendor/github.com/moby/go-archive/archive_windows.go new file mode 100644 index 0000000..0e3e316 --- /dev/null +++ b/vendor/github.com/moby/go-archive/archive_windows.go @@ -0,0 +1,62 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" +) + +// longPathPrefix is the longpath prefix for Windows file paths. +const longPathPrefix = `\\?\` + +// addLongPathPrefix adds the Windows long path prefix to the path provided if +// it does not already have it. It is a no-op on platforms other than Windows. +// +// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. +func addLongPathPrefix(srcPath string) string { + if strings.HasPrefix(srcPath, longPathPrefix) { + return srcPath + } + if strings.HasPrefix(srcPath, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + return longPathPrefix + `UNC` + srcPath[1:] + } + return longPathPrefix + srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // Remove group- and world-writable bits. + perm &= 0o755 + + // Add the x bit: make everything +x on Windows + return perm | 0o111 +} + +func getInodeFromStat(stat interface{}) (uint64, error) { + // do nothing. no notion of Inode in stat on Windows + return 0, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/moby/go-archive/changes.go b/vendor/github.com/moby/go-archive/changes.go new file mode 100644 index 0000000..02a0372 --- /dev/null +++ b/vendor/github.com/moby/go-archive/changes.go @@ -0,0 +1,430 @@ +package archive + +import ( + "archive/tar" + "bytes" + "context" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/containerd/log" + "github.com/moby/sys/user" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + ChangeModify = 0 // ChangeModify represents the modify operation. + ChangeAdd = 1 // ChangeAdd represents the add operation. + ChangeDelete = 2 // ChangeDelete represents the delete operation. +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return collectChanges(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return skip, err +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +) + +func collectChanges(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat fs.FileInfo + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var oldRoot, newRoot *FileInfo + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, idMap user.IdentityMapping) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idMap, writer, nil) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + log.G(context.TODO()).Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/moby/go-archive/changes_linux.go b/vendor/github.com/moby/go-archive/changes_linux.go new file mode 100644 index 0000000..8289fe1 --- /dev/null +++ b/vendor/github.com/moby/go-archive/changes_linux.go @@ -0,0 +1,274 @@ +package archive + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + info.stat = fi + info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for ix1 < len(names1) && ix2 < len(names2) { + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch strings.Compare(ni1.name, ni2.name) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" + name := string(b[0:clen(b[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/moby/go-archive/changes_other.go b/vendor/github.com/moby/go-archive/changes_other.go new file mode 100644 index 0000000..a8a3a5a --- /dev/null +++ b/vendor/github.com/moby/go-archive/changes_other.go @@ -0,0 +1,95 @@ +//go:build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + s, err := os.Lstat(path) + if err != nil { + return err + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + stat: s, + } + + info.capability, _ = lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/moby/go-archive/changes_unix.go b/vendor/github.com/moby/go-archive/changes_unix.go new file mode 100644 index 0000000..4dd98bd --- /dev/null +++ b/vendor/github.com/moby/go-archive/changes_unix.go @@ -0,0 +1,43 @@ +//go:build !windows + +package archive + +import ( + "io/fs" + "os" + "syscall" +) + +func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { + oldSys := oldStat.Sys().(*syscall.Stat_t) + newSys := newStat.Sys().(*syscall.Stat_t) + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldSys.Uid != newSys.Uid || + oldSys.Gid != newSys.Gid || + oldSys.Rdev != newSys.Rdev || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (!oldStat.Mode().IsDir() && + (!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/moby/go-archive/changes_windows.go b/vendor/github.com/moby/go-archive/changes_windows.go new file mode 100644 index 0000000..c89605c --- /dev/null +++ b/vendor/github.com/moby/go-archive/changes_windows.go @@ -0,0 +1,33 @@ +package archive + +import ( + "io/fs" + "os" +) + +func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/moby/go-archive/compression/compression.go b/vendor/github.com/moby/go-archive/compression/compression.go new file mode 100644 index 0000000..e298cef --- /dev/null +++ b/vendor/github.com/moby/go-archive/compression/compression.go @@ -0,0 +1,263 @@ +package compression + +import ( + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/containerd/log" + "github.com/klauspost/compress/zstd" +) + +// Compression is the state represents if compressed or not. +type Compression int + +const ( + None Compression = 0 // None represents the uncompressed. + Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm. + Gzip Compression = 2 // Gzip is gzip compression algorithm. + Xz Compression = 3 // Xz is xz compression algorithm. + Zstd Compression = 4 // Zstd is zstd compression algorithm. +) + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (c *Compression) Extension() string { + switch *c { + case None: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + case Zstd: + return "tar.zst" + default: + return "" + } +} + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (nopWriteCloser) Close() error { return nil } + +var bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (int, error) { + if r.buf == nil { + return 0, io.EOF + } + n, err := r.buf.Read(p) + if errors.Is(err, io.EOF) { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return n, err +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && !errors.Is(err, io.EOF) { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + switch compression := Detect(bs); compression { + case None: + return &readCloserWrapper{ + Reader: buf, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + return &readCloserWrapper{ + Reader: bz2Reader, + }, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: xzReader, + closer: func() error { + cancel() + return xzReader.Close() + }, + }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format (%d)", compression) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case None: + return nopWriteCloser{dest}, nil + case Gzip: + return gzip.NewWriter(dest), nil + case Bzip2: + // archive/bzip2 does not support writing. + return nil, errors.New("unsupported compression format: tar.bz2") + case Xz: + // there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, errors.New("unsupported compression format: tar.xz") + default: + return nil, fmt.Errorf("unsupported compression format (%d)", compression) + } +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { + noPigz, err := strconv.ParseBool(noPigzEnv) + if err != nil { + log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + } + if noPigz { + log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + return gzip.NewReader(buf) + } + } + + unpigzPath, err := exec.LookPath("unpigz") + if err != nil { + log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") + return gzip.NewReader(buf) + } + + log.G(ctx).Debugf("Using %s to decompress", unpigzPath) + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Ensure the command has exited before we clean anything up + done := make(chan struct{}) + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + _ = writer.CloseWithError(fmt.Errorf("%w: %s", err, errBuf.String())) + } else { + _ = writer.Close() + } + close(done) + }() + + return &readCloserWrapper{ + Reader: reader, + closer: func() error { + // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as + // cmd.Wait waits for any non-file stdout/stderr/stdin to close. + err := reader.Close() + <-done + return err + }, + }, nil +} diff --git a/vendor/github.com/moby/go-archive/compression/compression_detect.go b/vendor/github.com/moby/go-archive/compression/compression_detect.go new file mode 100644 index 0000000..85eda92 --- /dev/null +++ b/vendor/github.com/moby/go-archive/compression/compression_detect.go @@ -0,0 +1,65 @@ +package compression + +import ( + "bytes" + "encoding/binary" +) + +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + bzip2Magic = []byte{0x42, 0x5A, 0x68} + gzipMagic = []byte{0x1F, 0x8B, 0x08} + xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +// Detect detects the compression algorithm of the source. +func Detect(source []byte) Compression { + compressionMap := map[Compression]matcher{ + Bzip2: magicNumberMatcher(bzip2Magic), + Gzip: magicNumberMatcher(gzipMagic), + Xz: magicNumberMatcher(xzMagic), + Zstd: zstdMatcher(), + } + for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { + fn := compressionMap[compression] + if fn(source) { + return compression + } + } + return None +} + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// Zstandard compressed data is made of one or more frames. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} diff --git a/vendor/github.com/moby/go-archive/copy.go b/vendor/github.com/moby/go-archive/copy.go new file mode 100644 index 0000000..77d038c --- /dev/null +++ b/vendor/github.com/moby/go-archive/copy.go @@ -0,0 +1,496 @@ +package archive + +import ( + "archive/tar" + "context" + "errors" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/containerd/log" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +var copyPool = sync.Pool{ + New: func() interface{} { s := make([]byte, 32*1024); return &s }, +} + +func copyWithBuffer(dst io.Writer, src io.Reader) error { + buf := copyPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + copyPool.Put(buf) + return err +} + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && path[len(path)-1] == filepath.Separator +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { + sourcePath = normalizePath(sourcePath) + if _, err := os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return nil, err + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if errors.Is(err, io.EOF) { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 + // and https://cure53.de/pentest-report_opa.pdf, which recommends to + // replace io.Copy with io.CopyN7. The latter allows to specify the + // maximum number of bytes that should be read. By properly defining + // the limit, it can be assured that a GZip compression bomb cannot + // easily cause a Denial-of-Service. + // After reviewing with @tonistiigi and @cpuguy83, this should not + // affect us, because here we do not read into memory, hence should + // not be vulnerable to this code consuming memory. + //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { + if followLink { + var err error + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return "", "", err + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + resolvedDirPath, err := filepath.EvalSymlinks(dirPath) + if err != nil { + return "", "", err + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && + !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/moby/go-archive/copy_unix.go b/vendor/github.com/moby/go-archive/copy_unix.go new file mode 100644 index 0000000..f579282 --- /dev/null +++ b/vendor/github.com/moby/go-archive/copy_unix.go @@ -0,0 +1,11 @@ +//go:build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/moby/go-archive/copy_windows.go b/vendor/github.com/moby/go-archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/vendor/github.com/moby/go-archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/moby/go-archive/dev_freebsd.go b/vendor/github.com/moby/go-archive/dev_freebsd.go new file mode 100644 index 0000000..b3068fc --- /dev/null +++ b/vendor/github.com/moby/go-archive/dev_freebsd.go @@ -0,0 +1,9 @@ +//go:build freebsd + +package archive + +import "golang.org/x/sys/unix" + +func mknod(path string, mode uint32, dev uint64) error { + return unix.Mknod(path, mode, dev) +} diff --git a/vendor/github.com/moby/go-archive/dev_unix.go b/vendor/github.com/moby/go-archive/dev_unix.go new file mode 100644 index 0000000..dffc596 --- /dev/null +++ b/vendor/github.com/moby/go-archive/dev_unix.go @@ -0,0 +1,9 @@ +//go:build !windows && !freebsd + +package archive + +import "golang.org/x/sys/unix" + +func mknod(path string, mode uint32, dev uint64) error { + return unix.Mknod(path, mode, int(dev)) +} diff --git a/vendor/github.com/moby/go-archive/diff.go b/vendor/github.com/moby/go-archive/diff.go new file mode 100644 index 0000000..96db972 --- /dev/null +++ b/vendor/github.com/moby/go-archive/diff.go @@ -0,0 +1,261 @@ +package archive + +import ( + "archive/tar" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/log" + + "github.com/moby/go-archive/compression" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if errors.Is(err, io.EOF) { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return 0, err + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + // #nosec G305 -- The joined path is guarded against path traversal. + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + return os.RemoveAll(path) + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + srcData := io.Reader(tr) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, errors.New("invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(options.IDMap, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + path := filepath.Join(dest, hdr.Name) + if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// IsEmpty checks if the tar archive is empty (doesn't contain any entries). +func IsEmpty(rd io.Reader) (bool, error) { + decompRd, err := compression.DecompressStream(rd) + if err != nil { + return true, fmt.Errorf("failed to decompress archive: %w", err) + } + defer decompRd.Close() + + tarReader := tar.NewReader(decompRd) + if _, err := tarReader.Next(); err != nil { + if errors.Is(err, io.EOF) { + return true, nil + } + return false, fmt.Errorf("failed to read next archive header: %w", err) + } + + return false, nil +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + restore := overrideUmask(0) + defer restore() + + if decompress { + decompLayer, err := compression.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/moby/go-archive/diff_unix.go b/vendor/github.com/moby/go-archive/diff_unix.go new file mode 100644 index 0000000..7216f2f --- /dev/null +++ b/vendor/github.com/moby/go-archive/diff_unix.go @@ -0,0 +1,21 @@ +//go:build !windows + +package archive + +import "golang.org/x/sys/unix" + +// overrideUmask sets current process's file mode creation mask to newmask +// and returns a function to restore it. +// +// WARNING for readers stumbling upon this code. Changing umask in a multi- +// threaded environment isn't safe. Don't use this without understanding the +// risks, and don't export this function for others to use (we shouldn't even +// be using this ourself). +// +// FIXME(thaJeztah): we should get rid of these hacks if possible. +func overrideUmask(newMask int) func() { + oldMask := unix.Umask(newMask) + return func() { + unix.Umask(oldMask) + } +} diff --git a/vendor/github.com/moby/go-archive/diff_windows.go b/vendor/github.com/moby/go-archive/diff_windows.go new file mode 100644 index 0000000..d28f5b2 --- /dev/null +++ b/vendor/github.com/moby/go-archive/diff_windows.go @@ -0,0 +1,6 @@ +package archive + +// overrideUmask is a no-op on windows. +func overrideUmask(newmask int) func() { + return func() {} +} diff --git a/vendor/github.com/moby/go-archive/path.go b/vendor/github.com/moby/go-archive/path.go new file mode 100644 index 0000000..888a697 --- /dev/null +++ b/vendor/github.com/moby/go-archive/path.go @@ -0,0 +1,20 @@ +package archive + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return checkSystemDriveAndRemoveDriveLetter(path) +} diff --git a/vendor/github.com/moby/go-archive/path_unix.go b/vendor/github.com/moby/go-archive/path_unix.go new file mode 100644 index 0000000..390264b --- /dev/null +++ b/vendor/github.com/moby/go-archive/path_unix.go @@ -0,0 +1,9 @@ +//go:build !windows + +package archive + +// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/go-archive/path_windows.go b/vendor/github.com/moby/go-archive/path_windows.go new file mode 100644 index 0000000..7e18c8e --- /dev/null +++ b/vendor/github.com/moby/go-archive/path_windows.go @@ -0,0 +1,22 @@ +package archive + +import ( + "fmt" + "path/filepath" + "strings" +) + +// checkSystemDriveAndRemoveDriveLetter is the Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("no relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("the specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/go-archive/tarheader/tarheader.go b/vendor/github.com/moby/go-archive/tarheader/tarheader.go new file mode 100644 index 0000000..03732a4 --- /dev/null +++ b/vendor/github.com/moby/go-archive/tarheader/tarheader.go @@ -0,0 +1,67 @@ +package tarheader + +import ( + "archive/tar" + "os" +) + +// assert that we implement [tar.FileInfoNames]. +var _ tar.FileInfoNames = (*nosysFileInfo)(nil) + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +// +// It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader] +// from performing any lookups on go1.23 and up. see https://go.dev/issue/50102 +type nosysFileInfo struct { + os.FileInfo +} + +// Uname stubs out looking up username. It implements [tar.FileInfoNames] +// to prevent [tar.FileInfoHeader] from loading libraries to perform +// username lookups. +func (fi nosysFileInfo) Uname() (string, error) { + return "", nil +} + +// Gname stubs out looking up group-name. It implements [tar.FileInfoNames] +// to prevent [tar.FileInfoHeader] from loading libraries to perform +// username lookups. +func (fi nosysFileInfo) Gname() (string, error) { + return "", nil +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Compared to the archive/tar.FileInfoHeader function, this function is safe to +// call from a chrooted process as it does not populate fields which would +// require operating system lookups. It behaves identically to +// tar.FileInfoHeader when fi is a FileInfo value returned from +// tar.Header.FileInfo(). +// +// When fi is a FileInfo for a native file, such as returned from os.Stat() and +// os.Lstat(), the returned Header value differs from one returned from +// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not +// set as OS lookups would be required to populate them. The AccessTime and +// ChangeTime fields are not currently set (not yet implemented) although that +// is subject to change. Callers which require the AccessTime or ChangeTime +// fields to be zeroed should explicitly zero them out in the returned Header +// value to avoid any compatibility issues in the future. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStat(fi, hdr) +} diff --git a/vendor/github.com/moby/go-archive/tarheader/tarheader_unix.go b/vendor/github.com/moby/go-archive/tarheader/tarheader_unix.go new file mode 100644 index 0000000..9c3311c --- /dev/null +++ b/vendor/github.com/moby/go-archive/tarheader/tarheader_unix.go @@ -0,0 +1,46 @@ +//go:build !windows + +package tarheader + +import ( + "archive/tar" + "os" + "runtime" + "syscall" + + "golang.org/x/sys/unix" +) + +// sysStat populates hdr from system-dependent fields of fi without performing +// any OS lookups. +func sysStat(fi os.FileInfo, hdr *tar.Header) error { + // Devmajor and Devminor are only needed for special devices. + + // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): + // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 + // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). + + // ZFS in particular does not override the default: + // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 + + // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). + // Such large values cannot be encoded in a tar header. + if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { + return nil + } + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} diff --git a/vendor/github.com/moby/go-archive/tarheader/tarheader_windows.go b/vendor/github.com/moby/go-archive/tarheader/tarheader_windows.go new file mode 100644 index 0000000..5d4483c --- /dev/null +++ b/vendor/github.com/moby/go-archive/tarheader/tarheader_windows.go @@ -0,0 +1,12 @@ +package tarheader + +import ( + "archive/tar" + "os" +) + +// sysStat populates hdr from system-dependent fields of fi without performing +// any OS lookups. It is a no-op on Windows. +func sysStat(os.FileInfo, *tar.Header) error { + return nil +} diff --git a/vendor/github.com/moby/go-archive/time.go b/vendor/github.com/moby/go-archive/time.go new file mode 100644 index 0000000..4e9ae95 --- /dev/null +++ b/vendor/github.com/moby/go-archive/time.go @@ -0,0 +1,38 @@ +package archive + +import ( + "syscall" + "time" + "unsafe" +) + +var ( + minTime = time.Unix(0, 0) + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +func boundTime(t time.Time) time.Time { + if t.Before(minTime) || t.After(maxTime) { + return minTime + } + + return t +} + +func latestTime(t1, t2 time.Time) time.Time { + if t1.Before(t2) { + return t2 + } + return t1 +} diff --git a/vendor/github.com/moby/go-archive/time_nonwindows.go b/vendor/github.com/moby/go-archive/time_nonwindows.go new file mode 100644 index 0000000..5bfdfa2 --- /dev/null +++ b/vendor/github.com/moby/go-archive/time_nonwindows.go @@ -0,0 +1,41 @@ +//go:build !windows + +package archive + +import ( + "os" + "time" + + "golang.org/x/sys/unix" +) + +// chtimes changes the access time and modified time of a file at the given path. +// If the modified time is prior to the Unix Epoch (unixMinTime), or after the +// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this +// case, Chtimes defaults to Unix Epoch, just in case. +func chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func timeToTimespec(time time.Time) unix.Timespec { + if time.IsZero() { + // Return UTIME_OMIT special value + return unix.Timespec{ + Sec: 0, + Nsec: (1 << 30) - 2, + } + } + return unix.NsecToTimespec(time.UnixNano()) +} + +func lchtimes(name string, atime time.Time, mtime time.Time) error { + utimes := [2]unix.Timespec{ + timeToTimespec(atime), + timeToTimespec(mtime), + } + err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { + return err + } + return err +} diff --git a/vendor/github.com/moby/go-archive/time_windows.go b/vendor/github.com/moby/go-archive/time_windows.go new file mode 100644 index 0000000..af1f7c8 --- /dev/null +++ b/vendor/github.com/moby/go-archive/time_windows.go @@ -0,0 +1,32 @@ +package archive + +import ( + "os" + "time" + + "golang.org/x/sys/windows" +) + +func chtimes(name string, atime time.Time, mtime time.Time) error { + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + pathp, err := windows.UTF16PtrFromString(name) + if err != nil { + return err + } + h, err := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return err + } + defer windows.Close(h) + c := windows.NsecToFiletime(mtime.UnixNano()) + return windows.SetFileTime(h, &c, nil, nil) +} + +func lchtimes(name string, atime time.Time, mtime time.Time) error { + return nil +} diff --git a/vendor/github.com/moby/go-archive/whiteouts.go b/vendor/github.com/moby/go-archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/vendor/github.com/moby/go-archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/moby/go-archive/wrap.go b/vendor/github.com/moby/go-archive/wrap.go new file mode 100644 index 0000000..f8a9725 --- /dev/null +++ b/vendor/github.com/moby/go-archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) [][2]string { + output := make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return output +} diff --git a/vendor/github.com/moby/go-archive/xattr_supported.go b/vendor/github.com/moby/go-archive/xattr_supported.go new file mode 100644 index 0000000..652a1f0 --- /dev/null +++ b/vendor/github.com/moby/go-archive/xattr_supported.go @@ -0,0 +1,52 @@ +//go:build linux || darwin || freebsd || netbsd + +package archive + +import ( + "errors" + "fmt" + "io/fs" + + "golang.org/x/sys/unix" +) + +// lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It returns a nil slice and nil error if the xattr is not set. +func lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, err := unix.Lgetxattr(path, attr, dest) + + for errors.Is(err, unix.ERANGE) { + // Buffer too small, use zero-sized buffer to get the actual size + sz, err = unix.Lgetxattr(path, attr, []byte{}) + if err != nil { + return nil, wrapPathError("lgetxattr", path, attr, err) + } + dest = make([]byte, sz) + sz, err = unix.Lgetxattr(path, attr, dest) + } + + if err != nil { + if errors.Is(err, noattr) { + return nil, nil + } + return nil, wrapPathError("lgetxattr", path, attr, err) + } + + return dest[:sz], nil +} + +// lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func lsetxattr(path string, attr string, data []byte, flags int) error { + return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags)) +} + +func wrapPathError(op, path, attr string, err error) error { + if err == nil { + return nil + } + return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)} +} diff --git a/vendor/github.com/moby/go-archive/xattr_supported_linux.go b/vendor/github.com/moby/go-archive/xattr_supported_linux.go new file mode 100644 index 0000000..f2e7646 --- /dev/null +++ b/vendor/github.com/moby/go-archive/xattr_supported_linux.go @@ -0,0 +1,5 @@ +package archive + +import "golang.org/x/sys/unix" + +var noattr = unix.ENODATA diff --git a/vendor/github.com/moby/go-archive/xattr_supported_unix.go b/vendor/github.com/moby/go-archive/xattr_supported_unix.go new file mode 100644 index 0000000..4d88241 --- /dev/null +++ b/vendor/github.com/moby/go-archive/xattr_supported_unix.go @@ -0,0 +1,7 @@ +//go:build !linux && !windows + +package archive + +import "golang.org/x/sys/unix" + +var noattr = unix.ENOATTR diff --git a/vendor/github.com/moby/go-archive/xattr_unsupported.go b/vendor/github.com/moby/go-archive/xattr_unsupported.go new file mode 100644 index 0000000..b0d9165 --- /dev/null +++ b/vendor/github.com/moby/go-archive/xattr_unsupported.go @@ -0,0 +1,11 @@ +//go:build !linux && !darwin && !freebsd && !netbsd + +package archive + +func lgetxattr(path string, attr string) ([]byte, error) { + return nil, nil +} + +func lsetxattr(path string, attr string, data []byte, flags int) error { + return nil +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go index a3c7340..278cdfb 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -5,41 +5,22 @@ package sequential import "os" -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is an alias for [os.Create] on non-Windows platforms. func Create(name string) (*os.File, error) { return os.Create(name) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is an alias for [os.Open] on non-Windows platforms. func Open(name string) (*os.File, error) { return os.Open(name) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. +// OpenFile is an alias for [os.OpenFile] on non-Windows platforms. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is an alias for [os.CreateTemp] on non-Windows platforms. func CreateTemp(dir, prefix string) (f *os.File, err error) { return os.CreateTemp(dir, prefix) } diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go index 3f7f0d8..3500ecc 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -5,48 +5,52 @@ import ( "path/filepath" "strconv" "sync" - "syscall" "time" "unsafe" "golang.org/x/sys/windows" ) -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is a copy of [os.Create], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) + return openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is a copy of [os.Open], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) + return openFileSequential(name, windows.O_RDONLY) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. +// OpenFile is a copy of [os.OpenFile], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} + return openFileSequential(name, flag) } -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) +func openFileSequential(name string, flag int) (file *os.File, err error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: windows.ERROR_FILE_NOT_FOUND} + } + r, e := openSequential(name, flag|windows.O_CLOEXEC) if e != nil { - return nil, e + return nil, &os.PathError{Op: "open", Path: name, Err: e} } return os.NewFile(uintptr(r), name), nil } @@ -58,7 +62,7 @@ func makeInheritSa() *windows.SecurityAttributes { return &sa } -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { +func openSequential(path string, mode int) (fd windows.Handle, err error) { if len(path) == 0 { return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } @@ -101,15 +105,16 @@ func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err err createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_FLAG_SEQUENTIAL_SCAN, 0) return h, e } // Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex +var ( + rand uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -127,17 +132,13 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is a copy of [os.CreateTemp], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func CreateTemp(dir, prefix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() @@ -146,7 +147,7 @@ func CreateTemp(dir, prefix string) (f *os.File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + f, err = openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_EXCL) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() diff --git a/vendor/github.com/moby/sys/user/idtools.go b/vendor/github.com/moby/sys/user/idtools.go new file mode 100644 index 0000000..595b7a9 --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools.go @@ -0,0 +1,141 @@ +package user + +import ( + "fmt" + "os" +) + +// MkdirOpt is a type for options to pass to Mkdir calls +type MkdirOpt func(*mkdirOptions) + +type mkdirOptions struct { + onlyNew bool +} + +// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions +// on newly created directories. If the directory already exists, it will not be modified +func WithOnlyNew(o *mkdirOptions) { + o.onlyNew = true +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. By default, if the directory already exists, this +// function will still change ownership and permissions. If WithOnlyNew is passed as an +// option, then only the newly created directories will have ownership and permissions changed. +func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + + return mkdirAs(path, mode, uid, gid, true, options.onlyNew) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// By default, if the directory already exists, this function still changes ownership and permissions. +// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership +// and permissions changed. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + return mkdirAs(path, mode, uid, gid, false, options.onlyNew) +} + +// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) { + contID := int(m.ID + (int64(hostID) - m.ParentID)) + return contID, nil + } + } + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) { + hostID := int(m.ParentID + (int64(contID) - m.ID)) + return hostID, nil + } + } + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) +} + +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i IdentityMapping) RootPair() (int, int) { + uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) + return uid, gid +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) { + var err error + ruid, rgid := i.RootPair() + + if uid != ruid { + ruid, err = toHost(uid, i.UIDMaps) + if err != nil { + return ruid, rgid, err + } + } + + if gid != rgid { + rgid, err = toHost(gid, i.GIDMaps) + } + return ruid, rgid, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) { + ruid, err := toContainer(uid, i.UIDMaps) + if err != nil { + return -1, -1, err + } + rgid, err := toContainer(gid, i.GIDMaps) + return ruid, rgid, err +} + +// Empty returns true if there are no id mappings +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 +} diff --git a/vendor/github.com/moby/sys/user/idtools_unix.go b/vendor/github.com/moby/sys/user/idtools_unix.go new file mode 100644 index 0000000..4e39d24 --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_unix.go @@ -0,0 +1,143 @@ +//go:build !windows + +package user + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "syscall" +) + +func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if onlyNew { + return nil + } + + // short-circuit -- we were called with an existing directory and chown was requested + return setPermissions(path, mode, uid, gid, stat) + } + + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If onlyNew is true, we won't + // chown the full directory path if it exists + var paths []string + if os.IsNotExist(err) { + paths = append(paths, path) + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err = os.Stat(dirPath); os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err = os.MkdirAll(path, mode); err != nil { + return err + } + } else if err = os.Mkdir(path, mode); err != nil { + return err + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil { + return err + } + } + return nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error { + if stat == nil { + var err error + stat, err = os.Stat(p) + if err != nil { + return err + } + } + if stat.Mode().Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + ssi := stat.Sys().(*syscall.Stat_t) + if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + // TODO: Consider adding support for calling out to "getent" + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err) + } + + subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubRangesFile(path string, usr User) ([]IDMap, error) { + uidstr := strconv.Itoa(usr.Uid) + rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool { + return sid.Name == usr.Name || sid.Name == uidstr + }) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) + } + + idMap := []IDMap{} + + var containerID int64 + for _, idrange := range rangeList { + idMap = append(idMap, IDMap{ + ID: containerID, + ParentID: idrange.SubID, + Count: idrange.Count, + }) + containerID = containerID + idrange.Count + } + return idMap, nil +} diff --git a/vendor/github.com/moby/sys/user/idtools_windows.go b/vendor/github.com/moby/sys/user/idtools_windows.go new file mode 100644 index 0000000..9de730c --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_windows.go @@ -0,0 +1,13 @@ +package user + +import ( + "os" +) + +// This is currently a wrapper around [os.MkdirAll] since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error { + return os.MkdirAll(path, 0) +} diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go index 984466d..198c493 100644 --- a/vendor/github.com/moby/sys/user/user.go +++ b/vendor/github.com/moby/sys/user/user.go @@ -197,7 +197,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { for { var line []byte line, isPrefix, err = rd.ReadLine() - if err != nil { // We should return no error if EOF is reached // without a match. diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go index 2ec7706..579ce55 100644 --- a/vendor/github.com/moby/term/term_unix.go +++ b/vendor/github.com/moby/term/term_unix.go @@ -81,7 +81,7 @@ func setRawTerminal(fd uintptr) (*State, error) { return makeRaw(fd) } -func setRawTerminalOutput(fd uintptr) (*State, error) { +func setRawTerminalOutput(uintptr) (*State, error) { return nil, nil } diff --git a/vendor/github.com/playwright-community/playwright-go/.gitattributes b/vendor/github.com/playwright-community/playwright-go/.gitattributes new file mode 100644 index 0000000..c976050 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/.gitattributes @@ -0,0 +1,3 @@ +# text files must be lf for golden file tests to work +* text=auto eol=lf + diff --git a/vendor/github.com/playwright-community/playwright-go/.gitignore b/vendor/github.com/playwright-community/playwright-go/.gitignore new file mode 100644 index 0000000..83e2355 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.toptal.com/developers/gitignore/api/go +# Edit at https://www.toptal.com/developers/gitignore?templates=go + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +### Go Patch ### +/vendor/ +/Godeps/ + +# End of https://www.toptal.com/developers/gitignore/api/go +covprofile +.idea/ +.DS_Store + +api.json +_site/ +.jekyll-cache/ + +.vscode/settings.json \ No newline at end of file diff --git a/vendor/github.com/playwright-community/playwright-go/.gitmodules b/vendor/github.com/playwright-community/playwright-go/.gitmodules new file mode 100644 index 0000000..9ab899d --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/.gitmodules @@ -0,0 +1,3 @@ +[submodule "playwright"] + path = playwright + url = https://github.com/microsoft/playwright diff --git a/vendor/github.com/playwright-community/playwright-go/.golangci.yaml b/vendor/github.com/playwright-community/playwright-go/.golangci.yaml new file mode 100644 index 0000000..1557a3f --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/.golangci.yaml @@ -0,0 +1,6 @@ +--- +linters: + enable-all: false + disable-all: false + enable: + - gofumpt \ No newline at end of file diff --git a/vendor/github.com/playwright-community/playwright-go/.nojekyll b/vendor/github.com/playwright-community/playwright-go/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/playwright-community/playwright-go/404.html b/vendor/github.com/playwright-community/playwright-go/404.html new file mode 100644 index 0000000..086a5c9 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/404.html @@ -0,0 +1,25 @@ +--- +permalink: /404.html +layout: default +--- + + + +
+

404

+ +

Page not found :(

+

The requested page could not be found.

+
diff --git a/vendor/github.com/playwright-community/playwright-go/CONTRIBUTING.md b/vendor/github.com/playwright-community/playwright-go/CONTRIBUTING.md new file mode 100644 index 0000000..3b11995 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/CONTRIBUTING.md @@ -0,0 +1,39 @@ +# Contributing + +## Code style +The Go code is linted with [golangci-lint](https://golangci-lint.run/) and formatted with [gofumpt](https://github.com/mvdan/gofumpt). Please configure your editor to run the tools while developing and make sure to run the tools before committing any code. + +## Tests + +### Test coverage + +For every Pull Request on GitHub and on the main branch the coverage data will get sent over to [Coveralls](https://coveralls.io/github/playwright-community/playwright-go), this is helpful for finding functions that aren't covered by tests. + +### Running tests + +You can use the `BROWSER` environment variable to use a different browser than Chromium for the tests and use the `HEADLESS` environment variable which is useful for debugging. + +``` +BROWSER=chromium HEADLESS=1 go test -v --race ./... +``` + +### Roll + +1. Find out to which upstream version you want to roll, and change the value of `playwrightCliVersion` in the **run.go** to the new version. +1. Download current version of Playwright driver `go run scripts/install-browsers/main.go` +1. Apply patch `bash scripts/apply-patch.sh` +1. Fix merge conflicts if any, otherwise ignore this step. Once you are happy you can commit the changes `cd playwright; git commit -am "apply patch" && cd ..` +1. Regenerate a new patch `bash scripts/update-patch.sh` +1. Generate go code `go generate ./...` + +To adapt to the new version of Playwright's protocol and feature updates, you may need to modify the patch. Refer to the following steps: + +1. Apply patch `bash scripts/apply-patch.sh` +1. `cd playwright` +1. Revert the patch`git reset HEAD~1` +1. Modify the files under `docs/src/api`, etc. as needed. Available references: + - Protocol `packages/protocol/src/protocol.yml` + - [Playwright python](https://github.com/microsoft/playwright-python) +1. Commit the changes `git commit -am "apply patch"` +1. Regenerate a new patch `bash scripts/update-patch.sh` +1. Generate go code `go generate ./...`. diff --git a/vendor/github.com/playwright-community/playwright-go/Dockerfile.example b/vendor/github.com/playwright-community/playwright-go/Dockerfile.example new file mode 100644 index 0000000..3077cf3 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/Dockerfile.example @@ -0,0 +1,25 @@ +# Stage 1: Modules caching +FROM golang:1.22 as modules +COPY go.mod go.sum /modules/ +WORKDIR /modules +RUN go mod download + +# Stage 2: Build +FROM golang:1.22 as builder +COPY --from=modules /go/pkg /go/pkg +COPY . /workdir +WORKDIR /workdir +# Install playwright cli with right version for later use +RUN PWGO_VER=$(grep -oE "playwright-go v\S+" /workdir/go.mod | sed 's/playwright-go //g') \ + && go install github.com/playwright-community/playwright-go/cmd/playwright@${PWGO_VER} +# Build your app +RUN GOOS=linux GOARCH=amd64 go build -o /bin/myapp + +# Stage 3: Final +FROM ubuntu:noble +COPY --from=builder /go/bin/playwright /bin/myapp / +RUN apt-get update && apt-get install -y ca-certificates tzdata \ + # Install dependencies and all browsers (or specify one) + && /playwright install --with-deps \ + && rm -rf /var/lib/apt/lists/* +CMD ["/myapp"] \ No newline at end of file diff --git a/vendor/github.com/playwright-community/playwright-go/LICENSE b/vendor/github.com/playwright-community/playwright-go/LICENSE new file mode 100644 index 0000000..d4f29b3 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Max Schmitt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/playwright-community/playwright-go/README.md b/vendor/github.com/playwright-community/playwright-go/README.md new file mode 100644 index 0000000..a9e3dd7 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/README.md @@ -0,0 +1,148 @@ +# 🎭 [Playwright](https://github.com/microsoft/playwright#readme) for + +## Looking for maintainers and see [here](https://github.com/playwright-community/playwright-go/issues/122). Thanks! + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/playwright-community/playwright-go)](https://pkg.go.dev/github.com/playwright-community/playwright-go) +[![License](https://img.shields.io/badge/License-MIT-blue.svg)](http://opensource.org/licenses/MIT) +[![Go Report Card](https://goreportcard.com/badge/github.com/playwright-community/playwright-go)](https://goreportcard.com/report/github.com/playwright-community/playwright-go) ![Build Status](https://github.com/playwright-community/playwright-go/workflows/Go/badge.svg) +[![Join Slack](https://img.shields.io/badge/join-slack-infomational)](https://aka.ms/playwright-slack) [![Coverage Status](https://coveralls.io/repos/github/playwright-community/playwright-go/badge.svg?branch=main)](https://coveralls.io/github/playwright-community/playwright-go?branch=main) [![Chromium version](https://img.shields.io/badge/chromium-136.0.7103.25-blue.svg?logo=google-chrome)](https://www.chromium.org/Home) [![Firefox version](https://img.shields.io/badge/firefox-137.0-blue.svg?logo=mozilla-firefox)](https://www.mozilla.org/en-US/firefox/new/) [![WebKit version](https://img.shields.io/badge/webkit-18.4-blue.svg?logo=safari)](https://webkit.org/) + +[API reference](https://playwright.dev/docs/api/class-playwright) | [Example recipes](https://github.com/playwright-community/playwright-go/tree/main/examples) + +Playwright is a Go library to automate [Chromium](https://www.chromium.org/Home), [Firefox](https://www.mozilla.org/en-US/firefox/new/) and [WebKit](https://webkit.org/) with a single API. Playwright is built to enable cross-browser web automation that is **ever-green**, **capable**, **reliable** and **fast**. + +| | Linux | macOS | Windows | +| :--- | :---: | :---: | :---: | +| Chromium 136.0.7103.25 | ✅ | ✅ | ✅ | +| WebKit 18.4 | ✅ | ✅ | ✅ | +| Firefox 137.0 | ✅ | ✅ | ✅ | + +Headless execution is supported for all the browsers on all platforms. + +## Installation + +```txt +go get -u github.com/playwright-community/playwright-go +``` + +Install the browsers and OS dependencies: + +```bash +go run github.com/playwright-community/playwright-go/cmd/playwright@latest install --with-deps +# Or +go install github.com/playwright-community/playwright-go/cmd/playwright@latest +playwright install --with-deps +``` + +Alternatively you can do it inside your program via which downloads the driver and browsers: + +```go +err := playwright.Install() +``` + +## Capabilities + +Playwright is built to automate the broad and growing set of web browser capabilities used by Single Page Apps and Progressive Web Apps. + +* Scenarios that span multiple page, domains and iframes +* Auto-wait for elements to be ready before executing actions (like click, fill) +* Intercept network activity for stubbing and mocking network requests +* Emulate mobile devices, geolocation, permissions +* Support for web components via shadow-piercing selectors +* Native input events for mouse and keyboard +* Upload and download files + +## Example + +The following example crawls the current top voted items from [Hacker News](https://news.ycombinator.com). + +```go + +package main + +import ( + "fmt" + "log" + + "github.com/playwright-community/playwright-go" +) + +func main() { + pw, err := playwright.Run() + if err != nil { + log.Fatalf("could not start playwright: %v", err) + } + browser, err := pw.Chromium.Launch() + if err != nil { + log.Fatalf("could not launch browser: %v", err) + } + page, err := browser.NewPage() + if err != nil { + log.Fatalf("could not create page: %v", err) + } + if _, err = page.Goto("https://news.ycombinator.com"); err != nil { + log.Fatalf("could not goto: %v", err) + } + entries, err := page.Locator(".athing").All() + if err != nil { + log.Fatalf("could not get entries: %v", err) + } + for i, entry := range entries { + title, err := entry.Locator("td.title > span > a").TextContent() + if err != nil { + log.Fatalf("could not get text content: %v", err) + } + fmt.Printf("%d: %s\n", i+1, title) + } + if err = browser.Close(); err != nil { + log.Fatalf("could not close browser: %v", err) + } + if err = pw.Stop(); err != nil { + log.Fatalf("could not stop Playwright: %v", err) + } +} +``` + +## Docker +Refer to the [Dockerfile.example](./Dockerfile.example) to build your own docker image. + +## More examples + +* Refer to [helper_test.go](./tests/helper_test.go) for End-To-End testing +* [Downloading files](./examples/download/main.go) +* [End-To-End testing a website](./examples/end-to-end-testing/main.go) +* [Executing JavaScript in the browser](./examples/javascript/main.go) +* [Emulate mobile and geolocation](./examples/mobile-and-geolocation/main.go) +* [Parallel scraping using a WaitGroup](./examples/parallel-scraping/main.go) +* [Rendering a PDF of a website](./examples/pdf/main.go) +* [Scraping HackerNews](./examples/scraping/main.go) +* [Take a screenshot](./examples/screenshot/main.go) +* [Record a video](./examples/video/main.go) +* [Monitor network activity](./examples/network-monitoring/main.go) + +## How does it work? + +Playwright is a Node.js library which uses: + +* Chrome DevTools Protocol to communicate with Chromium +* Patched Firefox to communicate with Firefox +* Patched WebKit to communicate with WebKit + +These patches are based on the original sources of the browsers and don't modify the browser behaviour so the browsers are basically the same (see [here](https://github.com/microsoft/playwright/tree/main/browser_patches)) as you see them in the wild. The support for different programming languages is based on exposing a RPC server in the Node.js land which can be used to allow other languages to use Playwright without implementing all the custom logic: + +* [Playwright for Python](https://github.com/microsoft/playwright-python) +* [Playwright for .NET](https://github.com/microsoft/playwright-sharp) +* [Playwright for Java](https://github.com/microsoft/playwright-java) +* [Playwright for Go](https://github.com/playwright-community/playwright-go) + +The bridge between Node.js and the other languages is basically a Node.js runtime combined with Playwright which gets shipped for each of these languages (around 50MB) and then communicates over stdio to send the relevant commands. This will also download the pre-compiled browsers. + +## Is Playwright for Go ready? + +We are ready for your feedback, but we are still covering Playwright Go with the tests. + +## Resources + +* [Playwright for Go Documentation](https://pkg.go.dev/github.com/playwright-community/playwright-go) +* [Playwright Documentation](https://playwright.dev/docs/api/class-playwright) +* [Example recipes](https://github.com/playwright-community/playwright-go/tree/main/examples) diff --git a/vendor/github.com/playwright-community/playwright-go/_config.yml b/vendor/github.com/playwright-community/playwright-go/_config.yml new file mode 100644 index 0000000..15ec3b2 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/_config.yml @@ -0,0 +1,21 @@ +title: Playwright for Go +email: max@schmitt.mx +description: >- # this means to ignore newlines until "baseurl:" + Playwright is a Node.js library to automate Chromium, Firefox and WebKit with a single API. + Playwright is built to enable cross-browser web automation that is ever-green, capable, reliable and fast. +baseurl: "/playwright-go" +url: "https://playwright-community.github.io/" +twitter_username: maxibanki +github_username: playwright-community +remote_theme: pages-themes/cayman@v0.2.0 +plugins: + - jekyll-remote-theme + - jekyll-optional-front-matter + - jekyll-readme-index +exclude: + - playwright/ +defaults: + - scope: + path: "" # an empty string here means all files in the project + values: + layout: "default" diff --git a/vendor/github.com/playwright-community/playwright-go/apiresponse_assertions.go b/vendor/github.com/playwright-community/playwright-go/apiresponse_assertions.go new file mode 100644 index 0000000..187618e --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/apiresponse_assertions.go @@ -0,0 +1,75 @@ +package playwright + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +type apiResponseAssertionsImpl struct { + actual APIResponse + isNot bool +} + +func newAPIResponseAssertions(actual APIResponse, isNot bool) *apiResponseAssertionsImpl { + return &apiResponseAssertionsImpl{ + actual: actual, + isNot: isNot, + } +} + +func (ar *apiResponseAssertionsImpl) Not() APIResponseAssertions { + return newAPIResponseAssertions(ar.actual, true) +} + +func (ar *apiResponseAssertionsImpl) ToBeOK() error { + if ar.isNot != ar.actual.Ok() { + return nil + } + message := fmt.Sprintf(`Response status expected to be within [200..299] range, was %v`, ar.actual.Status()) + if ar.isNot { + message = strings.ReplaceAll(message, "expected to", "expected not to") + } + logList, err := ar.actual.(*apiResponseImpl).fetchLog() + if err != nil { + return err + } + log := strings.Join(logList, "\n") + if log != "" { + message += "\nCall log:\n" + log + } + + isTextEncoding := false + contentType, ok := ar.actual.Headers()["content-type"] + if ok { + isTextEncoding = isTexualMimeType(contentType) + } + if isTextEncoding { + text, err := ar.actual.Text() + if err == nil { + message += fmt.Sprintf(`\n Response Text:\n %s`, subString(text, 0, 1000)) + } + } + return errors.New(message) +} + +func isTexualMimeType(mimeType string) bool { + re := regexp.MustCompile(`^(text\/.*?|application\/(json|(x-)?javascript|xml.*?|ecmascript|graphql|x-www-form-urlencoded)|image\/svg(\+xml)?|application\/.*?(\+json|\+xml))(;\s*charset=.*)?$`) + return re.MatchString(mimeType) +} + +func subString(s string, start, length int) string { + if start < 0 { + start = 0 + } + if length < 0 { + length = 0 + } + rs := []rune(s) + end := start + length + if end > len(rs) { + end = len(rs) + } + return string(rs[start:end]) +} diff --git a/vendor/github.com/playwright-community/playwright-go/artifact.go b/vendor/github.com/playwright-community/playwright-go/artifact.go new file mode 100644 index 0000000..c76b892 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/artifact.go @@ -0,0 +1,70 @@ +package playwright + +import ( + "errors" + "fmt" +) + +type artifactImpl struct { + channelOwner +} + +func (a *artifactImpl) AbsolutePath() string { + return a.initializer["absolutePath"].(string) +} + +func (a *artifactImpl) PathAfterFinished() (string, error) { + if a.connection.isRemote { + return "", errors.New("Path is not available when connecting remotely. Use SaveAs() to save a local copy") + } + path, err := a.channel.Send("pathAfterFinished") + return path.(string), err +} + +func (a *artifactImpl) SaveAs(path string) error { + if !a.connection.isRemote { + _, err := a.channel.Send("saveAs", map[string]interface{}{ + "path": path, + }) + return err + } + streamChannel, err := a.channel.Send("saveAsStream") + if err != nil { + return err + } + stream := fromChannel(streamChannel).(*streamImpl) + return stream.SaveAs(path) +} + +func (a *artifactImpl) Failure() error { + reason, err := a.channel.Send("failure") + if reason == nil { + return err + } + return fmt.Errorf("%w: %v", ErrPlaywright, reason) +} + +func (a *artifactImpl) Delete() error { + _, err := a.channel.Send("delete") + return err +} + +func (a *artifactImpl) Cancel() error { + _, err := a.channel.Send("cancel") + return err +} + +func (a *artifactImpl) ReadIntoBuffer() ([]byte, error) { + streamChannel, err := a.channel.Send("stream") + if err != nil { + return nil, err + } + stream := fromChannel(streamChannel) + return stream.(*streamImpl).ReadAll() +} + +func newArtifact(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *artifactImpl { + artifact := &artifactImpl{} + artifact.createChannelOwner(artifact, parent, objectType, guid, initializer) + return artifact +} diff --git a/vendor/github.com/playwright-community/playwright-go/assertions.go b/vendor/github.com/playwright-community/playwright-go/assertions.go new file mode 100644 index 0000000..5e0e710 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/assertions.go @@ -0,0 +1,146 @@ +package playwright + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "strings" +) + +const assertionsDefaultTimeout = 5000 // 5s + +type playwrightAssertionsImpl struct { + defaultTimeout *float64 +} + +// NewPlaywrightAssertions creates a new instance of PlaywrightAssertions +// - timeout: default value is 5000 (ms) +func NewPlaywrightAssertions(timeout ...float64) PlaywrightAssertions { + if len(timeout) > 0 { + return &playwrightAssertionsImpl{Float(timeout[0])} + } + return &playwrightAssertionsImpl{Float(assertionsDefaultTimeout)} +} + +func (pa *playwrightAssertionsImpl) APIResponse(response APIResponse) APIResponseAssertions { + return newAPIResponseAssertions(response, false) +} + +func (pa *playwrightAssertionsImpl) Locator(locator Locator) LocatorAssertions { + return newLocatorAssertions(locator, false, pa.defaultTimeout) +} + +func (pa *playwrightAssertionsImpl) Page(page Page) PageAssertions { + return newPageAssertions(page, false, pa.defaultTimeout) +} + +type expectedTextValue struct { + Str *string `json:"string,omitempty"` + RegexSource *string `json:"regexSource,omitempty"` + RegexFlags *string `json:"regexFlags,omitempty"` + MatchSubstring *bool `json:"matchSubstring,omitempty"` + IgnoreCase *bool `json:"ignoreCase,omitempty"` + NormalizeWhiteSpace *bool `json:"normalizeWhiteSpace,omitempty"` +} + +type frameExpectOptions struct { + ExpressionArg interface{} `json:"expressionArg,omitempty"` + ExpectedText []expectedTextValue `json:"expectedText,omitempty"` + ExpectedNumber *float64 `json:"expectedNumber,omitempty"` + ExpectedValue interface{} `json:"expectedValue,omitempty"` + UseInnerText *bool `json:"useInnerText,omitempty"` + IsNot bool `json:"isNot"` + Timeout *float64 `json:"timeout"` +} + +type frameExpectResult struct { + Matches bool `json:"matches"` + Received interface{} `json:"received,omitempty"` + TimedOut *bool `json:"timedOut,omitempty"` + Log []string `json:"log,omitempty"` +} + +type assertionsBase struct { + actualLocator Locator + isNot bool + defaultTimeout *float64 +} + +func (b *assertionsBase) expect( + expression string, + options frameExpectOptions, + expected interface{}, + message string, +) error { + options.IsNot = b.isNot + if options.Timeout == nil { + options.Timeout = b.defaultTimeout + } + if options.IsNot { + message = strings.ReplaceAll(message, "expected to", "expected not to") + } + result, err := b.actualLocator.(*locatorImpl).expect(expression, options) + if err != nil { + return err + } + + if result.Matches == b.isNot { + actual := result.Received + log := strings.Join(result.Log, "\n") + if log != "" { + log = "\nCall log:\n" + log + } + if expected != nil { + return fmt.Errorf("%s '%v'\nActual value: %v %s", message, expected, actual, log) + } + return fmt.Errorf("%s\nActual value: %v %s", message, actual, log) + } + + return nil +} + +func toExpectedTextValues( + items []interface{}, + matchSubstring bool, + normalizeWhiteSpace bool, + ignoreCase *bool, +) ([]expectedTextValue, error) { + var out []expectedTextValue + for _, item := range items { + switch item := item.(type) { + case string: + out = append(out, expectedTextValue{ + Str: String(item), + MatchSubstring: Bool(matchSubstring), + NormalizeWhiteSpace: Bool(normalizeWhiteSpace), + IgnoreCase: ignoreCase, + }) + case *regexp.Regexp: + pattern, flags := convertRegexp(item) + out = append(out, expectedTextValue{ + RegexSource: String(pattern), + RegexFlags: String(flags), + MatchSubstring: Bool(matchSubstring), + NormalizeWhiteSpace: Bool(normalizeWhiteSpace), + IgnoreCase: ignoreCase, + }) + default: + return nil, errors.New("value must be a string or regexp") + } + } + return out, nil +} + +func convertToInterfaceList(v interface{}) []interface{} { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Slice { + return []interface{}{v} + } + + list := make([]interface{}, rv.Len()) + for i := 0; i < rv.Len(); i++ { + list[i] = rv.Index(i).Interface() + } + return list +} diff --git a/vendor/github.com/playwright-community/playwright-go/binding_call.go b/vendor/github.com/playwright-community/playwright-go/binding_call.go new file mode 100644 index 0000000..8468992 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/binding_call.go @@ -0,0 +1,87 @@ +package playwright + +import ( + "fmt" + "strings" + + "github.com/go-stack/stack" +) + +type BindingCall interface { + Call(f BindingCallFunction) +} + +type bindingCallImpl struct { + channelOwner +} + +// BindingSource is the value passed to a binding call execution +type BindingSource struct { + Context BrowserContext + Page Page + Frame Frame +} + +// ExposedFunction represents the func signature of an exposed function +type ExposedFunction = func(args ...interface{}) interface{} + +// BindingCallFunction represents the func signature of an exposed binding call func +type BindingCallFunction func(source *BindingSource, args ...interface{}) interface{} + +func (b *bindingCallImpl) Call(f BindingCallFunction) { + defer func() { + if r := recover(); r != nil { + if _, err := b.channel.Send("reject", map[string]interface{}{ + "error": serializeError(r.(error)), + }); err != nil { + logger.Error("could not reject BindingCall", "error", err) + } + } + }() + + frame := fromChannel(b.initializer["frame"]).(*frameImpl) + source := &BindingSource{ + Context: frame.Page().Context(), + Page: frame.Page(), + Frame: frame, + } + var result interface{} + if handle, ok := b.initializer["handle"]; ok { + result = f(source, fromChannel(handle)) + } else { + initializerArgs := b.initializer["args"].([]interface{}) + funcArgs := []interface{}{} + for i := 0; i < len(initializerArgs); i++ { + funcArgs = append(funcArgs, parseResult(initializerArgs[i])) + } + result = f(source, funcArgs...) + } + _, err := b.channel.Send("resolve", map[string]interface{}{ + "result": serializeArgument(result), + }) + if err != nil { + logger.Error("could not resolve BindingCall", "error", err) + } +} + +func serializeError(err error) map[string]interface{} { + st := stack.Trace().TrimRuntime() + if len(st) == 0 { // https://github.com/go-stack/stack/issues/27 + st = stack.Trace() + } + return map[string]interface{}{ + "error": &Error{ + Name: "Playwright for Go Error", + Message: err.Error(), + Stack: strings.ReplaceAll(strings.TrimFunc(fmt.Sprintf("%+v", st), func(r rune) bool { + return r == '[' || r == ']' + }), " ", "\n"), + }, + } +} + +func newBindingCall(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *bindingCallImpl { + bt := &bindingCallImpl{} + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/browser.go b/vendor/github.com/playwright-community/playwright-go/browser.go new file mode 100644 index 0000000..c87540a --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/browser.go @@ -0,0 +1,274 @@ +package playwright + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" +) + +type browserImpl struct { + channelOwner + isConnected bool + shouldCloseConnectionOnClose bool + contexts []BrowserContext + browserType BrowserType + chromiumTracingPath *string + closeReason *string +} + +func (b *browserImpl) BrowserType() BrowserType { + return b.browserType +} + +func (b *browserImpl) IsConnected() bool { + b.RLock() + defer b.RUnlock() + return b.isConnected +} + +func (b *browserImpl) NewContext(options ...BrowserNewContextOptions) (BrowserContext, error) { + overrides := map[string]interface{}{} + option := BrowserNewContextOptions{} + if len(options) == 1 { + option = options[0] + } + if option.AcceptDownloads != nil { + if *option.AcceptDownloads { + overrides["acceptDownloads"] = "accept" + } else { + overrides["acceptDownloads"] = "deny" + } + options[0].AcceptDownloads = nil + } + if option.ExtraHttpHeaders != nil { + overrides["extraHTTPHeaders"] = serializeMapToNameAndValue(options[0].ExtraHttpHeaders) + options[0].ExtraHttpHeaders = nil + } + if option.ClientCertificates != nil { + certs, err := transformClientCertificate(option.ClientCertificates) + if err != nil { + return nil, err + } + overrides["clientCertificates"] = certs + options[0].ClientCertificates = nil + } + if option.StorageStatePath != nil { + var storageState *OptionalStorageState + storageString, err := os.ReadFile(*options[0].StorageStatePath) + if err != nil { + return nil, fmt.Errorf("could not read storage state file: %w", err) + } + err = json.Unmarshal(storageString, &storageState) + if err != nil { + return nil, fmt.Errorf("could not parse storage state file: %w", err) + } + options[0].StorageState = storageState + options[0].StorageStatePath = nil + } + if option.NoViewport != nil && *options[0].NoViewport { + overrides["noDefaultViewport"] = true + options[0].NoViewport = nil + } + if option.RecordHarPath != nil { + overrides["recordHar"] = prepareRecordHarOptions(recordHarInputOptions{ + Path: *options[0].RecordHarPath, + URL: options[0].RecordHarURLFilter, + Mode: options[0].RecordHarMode, + Content: options[0].RecordHarContent, + OmitContent: options[0].RecordHarOmitContent, + }) + options[0].RecordHarPath = nil + options[0].RecordHarURLFilter = nil + options[0].RecordHarMode = nil + options[0].RecordHarContent = nil + options[0].RecordHarOmitContent = nil + } + channel, err := b.channel.Send("newContext", options, overrides) + if err != nil { + return nil, err + } + context := fromChannel(channel).(*browserContextImpl) + context.browser = b + b.browserType.(*browserTypeImpl).didCreateContext(context, &option, nil) + return context, nil +} + +func (b *browserImpl) NewPage(options ...BrowserNewPageOptions) (Page, error) { + opts := make([]BrowserNewContextOptions, 0) + if len(options) == 1 { + opts = append(opts, BrowserNewContextOptions(options[0])) + } + context, err := b.NewContext(opts...) + if err != nil { + return nil, err + } + page, err := context.NewPage() + if err != nil { + return nil, err + } + page.(*pageImpl).ownedContext = context + context.(*browserContextImpl).ownedPage = page + return page, nil +} + +func (b *browserImpl) NewBrowserCDPSession() (CDPSession, error) { + channel, err := b.channel.Send("newBrowserCDPSession") + if err != nil { + return nil, err + } + + cdpSession := fromChannel(channel).(*cdpSessionImpl) + + return cdpSession, nil +} + +func (b *browserImpl) Contexts() []BrowserContext { + b.Lock() + defer b.Unlock() + return b.contexts +} + +func (b *browserImpl) Close(options ...BrowserCloseOptions) (err error) { + if len(options) == 1 { + b.closeReason = options[0].Reason + } + + if b.shouldCloseConnectionOnClose { + err = b.connection.Stop() + } else if b.closeReason != nil { + _, err = b.channel.Send("close", map[string]interface{}{ + "reason": b.closeReason, + }) + } else { + _, err = b.channel.Send("close") + } + if err != nil && !errors.Is(err, ErrTargetClosed) { + return fmt.Errorf("close browser failed: %w", err) + } + return nil +} + +func (b *browserImpl) Version() string { + return b.initializer["version"].(string) +} + +func (b *browserImpl) StartTracing(options ...BrowserStartTracingOptions) error { + overrides := map[string]interface{}{} + option := BrowserStartTracingOptions{} + if len(options) == 1 { + option = options[0] + } + if option.Page != nil { + overrides["page"] = option.Page.(*pageImpl).channel + option.Page = nil + } + if option.Path != nil { + b.chromiumTracingPath = option.Path + option.Path = nil + } + _, err := b.channel.Send("startTracing", option, overrides) + return err +} + +func (b *browserImpl) StopTracing() ([]byte, error) { + channel, err := b.channel.Send("stopTracing") + if err != nil { + return nil, err + } + artifact := fromChannel(channel).(*artifactImpl) + binary, err := artifact.ReadIntoBuffer() + if err != nil { + return nil, err + } + err = artifact.Delete() + if err != nil { + return binary, err + } + if b.chromiumTracingPath != nil { + err := os.MkdirAll(filepath.Dir(*b.chromiumTracingPath), 0o777) + if err != nil { + return binary, err + } + err = os.WriteFile(*b.chromiumTracingPath, binary, 0o644) + if err != nil { + return binary, err + } + } + return binary, nil +} + +func (b *browserImpl) onClose() { + b.Lock() + if b.isConnected { + b.isConnected = false + b.Unlock() + b.Emit("disconnected", b) + return + } + b.Unlock() +} + +func (b *browserImpl) OnDisconnected(fn func(Browser)) { + b.On("disconnected", fn) +} + +func newBrowser(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *browserImpl { + b := &browserImpl{ + isConnected: true, + contexts: make([]BrowserContext, 0), + } + b.createChannelOwner(b, parent, objectType, guid, initializer) + // convert parent to *browserTypeImpl + b.browserType = newBrowserType(parent.parent, parent.objectType, parent.guid, parent.initializer) + b.channel.On("close", b.onClose) + return b +} + +func transformClientCertificate(clientCertificates []ClientCertificate) ([]map[string]interface{}, error) { + results := make([]map[string]interface{}, 0) + + for _, cert := range clientCertificates { + data := map[string]interface{}{ + "origin": cert.Origin, + "passphrase": cert.Passphrase, + } + if len(cert.Cert) > 0 { + data["cert"] = base64.StdEncoding.EncodeToString(cert.Cert) + } else if cert.CertPath != nil { + content, err := os.ReadFile(*cert.CertPath) + if err != nil { + return nil, err + } + data["cert"] = base64.StdEncoding.EncodeToString(content) + } + + if len(cert.Key) > 0 { + data["key"] = base64.StdEncoding.EncodeToString(cert.Key) + } else if cert.KeyPath != nil { + content, err := os.ReadFile(*cert.KeyPath) + if err != nil { + return nil, err + } + data["key"] = base64.StdEncoding.EncodeToString(content) + } + + if len(cert.Pfx) > 0 { + data["pfx"] = base64.StdEncoding.EncodeToString(cert.Pfx) + } else if cert.PfxPath != nil { + content, err := os.ReadFile(*cert.PfxPath) + if err != nil { + return nil, err + } + data["pfx"] = base64.StdEncoding.EncodeToString(content) + } + + results = append(results, data) + } + if len(results) == 0 { + return nil, nil + } + return results, nil +} diff --git a/vendor/github.com/playwright-community/playwright-go/browser_context.go b/vendor/github.com/playwright-community/playwright-go/browser_context.go new file mode 100644 index 0000000..1d420d3 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/browser_context.go @@ -0,0 +1,914 @@ +package playwright + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "regexp" + "slices" + "strings" + "sync" + + "github.com/playwright-community/playwright-go/internal/safe" +) + +type browserContextImpl struct { + channelOwner + timeoutSettings *timeoutSettings + closeWasCalled bool + options *BrowserNewContextOptions + pages []Page + routes []*routeHandlerEntry + webSocketRoutes []*webSocketRouteHandler + ownedPage Page + browser *browserImpl + serviceWorkers []Worker + backgroundPages []Page + bindings *safe.SyncMap[string, BindingCallFunction] + tracing *tracingImpl + request *apiRequestContextImpl + harRecorders map[string]harRecordingMetadata + closed chan struct{} + closeReason *string + harRouters []*harRouter + clock Clock +} + +func (b *browserContextImpl) Clock() Clock { + return b.clock +} + +func (b *browserContextImpl) SetDefaultNavigationTimeout(timeout float64) { + b.setDefaultNavigationTimeoutImpl(&timeout) +} + +func (b *browserContextImpl) setDefaultNavigationTimeoutImpl(timeout *float64) { + b.timeoutSettings.SetDefaultNavigationTimeout(timeout) + b.channel.SendNoReplyInternal("setDefaultNavigationTimeoutNoReply", map[string]interface{}{ + "timeout": timeout, + }) +} + +func (b *browserContextImpl) SetDefaultTimeout(timeout float64) { + b.setDefaultTimeoutImpl(&timeout) +} + +func (b *browserContextImpl) setDefaultTimeoutImpl(timeout *float64) { + b.timeoutSettings.SetDefaultTimeout(timeout) + b.channel.SendNoReplyInternal("setDefaultTimeoutNoReply", map[string]interface{}{ + "timeout": timeout, + }) +} + +func (b *browserContextImpl) Pages() []Page { + b.Lock() + defer b.Unlock() + return b.pages +} + +func (b *browserContextImpl) Browser() Browser { + return b.browser +} + +func (b *browserContextImpl) Tracing() Tracing { + return b.tracing +} + +func (b *browserContextImpl) NewCDPSession(page interface{}) (CDPSession, error) { + params := map[string]interface{}{} + + if p, ok := page.(*pageImpl); ok { + params["page"] = p.channel + } else if f, ok := page.(*frameImpl); ok { + params["frame"] = f.channel + } else { + return nil, fmt.Errorf("not page or frame: %v", page) + } + + channel, err := b.channel.Send("newCDPSession", params) + if err != nil { + return nil, err + } + + cdpSession := fromChannel(channel).(*cdpSessionImpl) + + return cdpSession, nil +} + +func (b *browserContextImpl) NewPage() (Page, error) { + if b.ownedPage != nil { + return nil, errors.New("Please use browser.NewContext()") + } + channel, err := b.channel.Send("newPage") + if err != nil { + return nil, err + } + return fromChannel(channel).(*pageImpl), nil +} + +func (b *browserContextImpl) Cookies(urls ...string) ([]Cookie, error) { + result, err := b.channel.Send("cookies", map[string]interface{}{ + "urls": urls, + }) + if err != nil { + return nil, err + } + cookies := make([]Cookie, len(result.([]interface{}))) + for i, item := range result.([]interface{}) { + cookie := &Cookie{} + remapMapToStruct(item, cookie) + cookies[i] = *cookie + } + return cookies, nil +} + +func (b *browserContextImpl) AddCookies(cookies []OptionalCookie) error { + _, err := b.channel.Send("addCookies", map[string]interface{}{ + "cookies": cookies, + }) + return err +} + +func (b *browserContextImpl) ClearCookies(options ...BrowserContextClearCookiesOptions) error { + params := map[string]interface{}{} + if len(options) == 1 { + if options[0].Domain != nil { + switch t := options[0].Domain.(type) { + case string: + params["domain"] = t + case *string: + params["domain"] = t + case *regexp.Regexp: + pattern, flag := convertRegexp(t) + params["domainRegexSource"] = pattern + params["domainRegexFlags"] = flag + default: + return errors.New("invalid type for domain, expected string or *regexp.Regexp") + } + } + if options[0].Name != nil { + switch t := options[0].Name.(type) { + case string: + params["name"] = t + case *string: + params["name"] = t + case *regexp.Regexp: + pattern, flag := convertRegexp(t) + params["nameRegexSource"] = pattern + params["nameRegexFlags"] = flag + default: + return errors.New("invalid type for name, expected string or *regexp.Regexp") + } + } + if options[0].Path != nil { + switch t := options[0].Path.(type) { + case string: + params["path"] = t + case *string: + params["path"] = t + case *regexp.Regexp: + pattern, flag := convertRegexp(t) + params["pathRegexSource"] = pattern + params["pathRegexFlags"] = flag + default: + return errors.New("invalid type for path, expected string or *regexp.Regexp") + } + } + } + _, err := b.channel.Send("clearCookies", params) + return err +} + +func (b *browserContextImpl) GrantPermissions(permissions []string, options ...BrowserContextGrantPermissionsOptions) error { + _, err := b.channel.Send("grantPermissions", map[string]interface{}{ + "permissions": permissions, + }, options) + return err +} + +func (b *browserContextImpl) ClearPermissions() error { + _, err := b.channel.Send("clearPermissions") + return err +} + +func (b *browserContextImpl) SetGeolocation(geolocation *Geolocation) error { + _, err := b.channel.Send("setGeolocation", map[string]interface{}{ + "geolocation": geolocation, + }) + return err +} + +func (b *browserContextImpl) ResetGeolocation() error { + _, err := b.channel.Send("setGeolocation", map[string]interface{}{}) + return err +} + +func (b *browserContextImpl) SetExtraHTTPHeaders(headers map[string]string) error { + _, err := b.channel.Send("setExtraHTTPHeaders", map[string]interface{}{ + "headers": serializeMapToNameAndValue(headers), + }) + return err +} + +func (b *browserContextImpl) SetOffline(offline bool) error { + _, err := b.channel.Send("setOffline", map[string]interface{}{ + "offline": offline, + }) + return err +} + +func (b *browserContextImpl) AddInitScript(script Script) error { + var source string + if script.Content != nil { + source = *script.Content + } + if script.Path != nil { + content, err := os.ReadFile(*script.Path) + if err != nil { + return err + } + source = string(content) + } + _, err := b.channel.Send("addInitScript", map[string]interface{}{ + "source": source, + }) + return err +} + +func (b *browserContextImpl) ExposeBinding(name string, binding BindingCallFunction, handle ...bool) error { + needsHandle := false + if len(handle) == 1 { + needsHandle = handle[0] + } + for _, page := range b.Pages() { + if _, ok := page.(*pageImpl).bindings.Load(name); ok { + return fmt.Errorf("Function '%s' has been already registered in one of the pages", name) + } + } + if _, ok := b.bindings.Load(name); ok { + return fmt.Errorf("Function '%s' has been already registered", name) + } + _, err := b.channel.Send("exposeBinding", map[string]interface{}{ + "name": name, + "needsHandle": needsHandle, + }) + if err != nil { + return err + } + b.bindings.Store(name, binding) + return err +} + +func (b *browserContextImpl) ExposeFunction(name string, binding ExposedFunction) error { + return b.ExposeBinding(name, func(source *BindingSource, args ...interface{}) interface{} { + return binding(args...) + }) +} + +func (b *browserContextImpl) Route(url interface{}, handler routeHandler, times ...int) error { + b.Lock() + defer b.Unlock() + b.routes = slices.Insert(b.routes, 0, newRouteHandlerEntry(newURLMatcher(url, b.options.BaseURL), handler, times...)) + return b.updateInterceptionPatterns() +} + +func (b *browserContextImpl) Unroute(url interface{}, handlers ...routeHandler) error { + removed, remaining, err := unroute(b.routes, url, handlers...) + if err != nil { + return err + } + return b.unrouteInternal(removed, remaining, UnrouteBehaviorDefault) +} + +func (b *browserContextImpl) unrouteInternal(removed []*routeHandlerEntry, remaining []*routeHandlerEntry, behavior *UnrouteBehavior) error { + b.Lock() + defer b.Unlock() + b.routes = remaining + if err := b.updateInterceptionPatterns(); err != nil { + return err + } + if behavior == nil || behavior == UnrouteBehaviorDefault { + return nil + } + wg := &sync.WaitGroup{} + for _, entry := range removed { + wg.Add(1) + go func(entry *routeHandlerEntry) { + defer wg.Done() + entry.Stop(string(*behavior)) + }(entry) + } + wg.Wait() + return nil +} + +func (b *browserContextImpl) UnrouteAll(options ...BrowserContextUnrouteAllOptions) error { + var behavior *UnrouteBehavior + if len(options) == 1 { + behavior = options[0].Behavior + } + defer b.disposeHarRouters() + return b.unrouteInternal(b.routes, []*routeHandlerEntry{}, behavior) +} + +func (b *browserContextImpl) disposeHarRouters() { + for _, router := range b.harRouters { + router.dispose() + } + b.harRouters = make([]*harRouter, 0) +} + +func (b *browserContextImpl) Request() APIRequestContext { + return b.request +} + +func (b *browserContextImpl) RouteFromHAR(har string, options ...BrowserContextRouteFromHAROptions) error { + opt := BrowserContextRouteFromHAROptions{} + if len(options) == 1 { + opt = options[0] + } + if opt.Update != nil && *opt.Update { + var updateContent *HarContentPolicy + switch opt.UpdateContent { + case RouteFromHarUpdateContentPolicyAttach: + updateContent = HarContentPolicyAttach + case RouteFromHarUpdateContentPolicyEmbed: + updateContent = HarContentPolicyEmbed + } + return b.recordIntoHar(har, browserContextRecordIntoHarOptions{ + URL: opt.URL, + UpdateContent: updateContent, + UpdateMode: opt.UpdateMode, + }) + } + notFound := opt.NotFound + if notFound == nil { + notFound = HarNotFoundAbort + } + router := newHarRouter(b.connection.localUtils, har, *notFound, opt.URL) + b.harRouters = append(b.harRouters, router) + return router.addContextRoute(b) +} + +func (b *browserContextImpl) WaitForEvent(event string, options ...BrowserContextWaitForEventOptions) (interface{}, error) { + return b.waiterForEvent(event, options...).Wait() +} + +func (b *browserContextImpl) waiterForEvent(event string, options ...BrowserContextWaitForEventOptions) *waiter { + timeout := b.timeoutSettings.Timeout() + var predicate interface{} = nil + if len(options) == 1 { + if options[0].Timeout != nil { + timeout = *options[0].Timeout + } + predicate = options[0].Predicate + } + waiter := newWaiter().WithTimeout(timeout) + waiter.RejectOnEvent(b, "close", ErrTargetClosed) + return waiter.WaitForEvent(b, event, predicate) +} + +func (b *browserContextImpl) ExpectConsoleMessage(cb func() error, options ...BrowserContextExpectConsoleMessageOptions) (ConsoleMessage, error) { + var w *waiter + if len(options) == 1 { + w = b.waiterForEvent("console", BrowserContextWaitForEventOptions{ + Predicate: options[0].Predicate, + Timeout: options[0].Timeout, + }) + } else { + w = b.waiterForEvent("console") + } + ret, err := w.RunAndWait(cb) + if err != nil { + return nil, err + } + return ret.(ConsoleMessage), nil +} + +func (b *browserContextImpl) ExpectEvent(event string, cb func() error, options ...BrowserContextExpectEventOptions) (interface{}, error) { + if len(options) == 1 { + return b.waiterForEvent(event, BrowserContextWaitForEventOptions(options[0])).RunAndWait(cb) + } + return b.waiterForEvent(event).RunAndWait(cb) +} + +func (b *browserContextImpl) ExpectPage(cb func() error, options ...BrowserContextExpectPageOptions) (Page, error) { + var w *waiter + if len(options) == 1 { + w = b.waiterForEvent("page", BrowserContextWaitForEventOptions{ + Predicate: options[0].Predicate, + Timeout: options[0].Timeout, + }) + } else { + w = b.waiterForEvent("page") + } + ret, err := w.RunAndWait(cb) + if err != nil { + return nil, err + } + return ret.(Page), nil +} + +func (b *browserContextImpl) Close(options ...BrowserContextCloseOptions) error { + if b.closeWasCalled { + return nil + } + if len(options) == 1 { + b.closeReason = options[0].Reason + } + b.closeWasCalled = true + + _, err := b.channel.connection.WrapAPICall(func() (interface{}, error) { + return nil, b.request.Dispose(APIRequestContextDisposeOptions{ + Reason: b.closeReason, + }) + }, true) + if err != nil { + return err + } + + innerClose := func() (interface{}, error) { + for harId, harMetaData := range b.harRecorders { + overrides := map[string]interface{}{} + if harId != "" { + overrides["harId"] = harId + } + response, err := b.channel.Send("harExport", overrides) + if err != nil { + return nil, err + } + artifact := fromChannel(response).(*artifactImpl) + // Server side will compress artifact if content is attach or if file is .zip. + needCompressed := strings.HasSuffix(strings.ToLower(harMetaData.Path), ".zip") + if !needCompressed && harMetaData.Content == HarContentPolicyAttach { + tmpPath := harMetaData.Path + ".tmp" + if err := artifact.SaveAs(tmpPath); err != nil { + return nil, err + } + err = b.connection.localUtils.HarUnzip(tmpPath, harMetaData.Path) + if err != nil { + return nil, err + } + } else { + if err := artifact.SaveAs(harMetaData.Path); err != nil { + return nil, err + } + } + if err := artifact.Delete(); err != nil { + return nil, err + } + } + return nil, nil + } + + _, err = b.channel.connection.WrapAPICall(innerClose, true) + if err != nil { + return err + } + + _, err = b.channel.Send("close", map[string]interface{}{ + "reason": b.closeReason, + }) + if err != nil { + return err + } + <-b.closed + return err +} + +type browserContextRecordIntoHarOptions struct { + Page Page + URL interface{} + UpdateContent *HarContentPolicy + UpdateMode *HarMode +} + +func (b *browserContextImpl) recordIntoHar(har string, options ...browserContextRecordIntoHarOptions) error { + overrides := map[string]interface{}{} + harOptions := recordHarInputOptions{ + Path: har, + Content: HarContentPolicyAttach, + Mode: HarModeMinimal, + } + if len(options) == 1 { + if options[0].UpdateContent != nil { + harOptions.Content = options[0].UpdateContent + } + if options[0].UpdateMode != nil { + harOptions.Mode = options[0].UpdateMode + } + harOptions.URL = options[0].URL + overrides["options"] = prepareRecordHarOptions(harOptions) + if options[0].Page != nil { + overrides["page"] = options[0].Page.(*pageImpl).channel + } + } + harId, err := b.channel.Send("harStart", overrides) + if err != nil { + return err + } + b.harRecorders[harId.(string)] = harRecordingMetadata{ + Path: har, + Content: harOptions.Content, + } + return nil +} + +func (b *browserContextImpl) StorageState(paths ...string) (*StorageState, error) { + result, err := b.channel.SendReturnAsDict("storageState") + if err != nil { + return nil, err + } + if len(paths) == 1 { + file, err := os.Create(paths[0]) + if err != nil { + return nil, err + } + if err := json.NewEncoder(file).Encode(result); err != nil { + return nil, err + } + if err := file.Close(); err != nil { + return nil, err + } + } + var storageState StorageState + remapMapToStruct(result, &storageState) + return &storageState, nil +} + +func (b *browserContextImpl) onBinding(binding *bindingCallImpl) { + function, ok := b.bindings.Load(binding.initializer["name"].(string)) + if !ok || function == nil { + return + } + go binding.Call(function) +} + +func (b *browserContextImpl) onClose() { + if b.browser != nil { + contexts := make([]BrowserContext, 0) + b.browser.Lock() + for _, context := range b.browser.contexts { + if context != b { + contexts = append(contexts, context) + } + } + b.browser.contexts = contexts + b.browser.Unlock() + } + b.disposeHarRouters() + b.Emit("close", b) +} + +func (b *browserContextImpl) onPage(page Page) { + b.Lock() + b.pages = append(b.pages, page) + b.Unlock() + b.Emit("page", page) + opener, _ := page.Opener() + if opener != nil && !opener.IsClosed() { + opener.Emit("popup", page) + } +} + +func (b *browserContextImpl) onRoute(route *routeImpl) { + b.Lock() + route.context = b + page := route.Request().(*requestImpl).safePage() + routes := make([]*routeHandlerEntry, len(b.routes)) + copy(routes, b.routes) + b.Unlock() + + checkInterceptionIfNeeded := func() { + b.Lock() + defer b.Unlock() + if len(b.routes) == 0 { + _, err := b.connection.WrapAPICall(func() (interface{}, error) { + err := b.updateInterceptionPatterns() + return nil, err + }, true) + if err != nil { + logger.Error("could not update interception patterns", "error", err) + } + } + } + + url := route.Request().URL() + for _, handlerEntry := range routes { + // If the page or the context was closed we stall all requests right away. + if (page != nil && page.closeWasCalled) || b.closeWasCalled { + return + } + if !handlerEntry.Matches(url) { + continue + } + if !slices.ContainsFunc(b.routes, func(entry *routeHandlerEntry) bool { + return entry == handlerEntry + }) { + continue + } + if handlerEntry.WillExceed() { + b.routes = slices.DeleteFunc(b.routes, func(rhe *routeHandlerEntry) bool { + return rhe == handlerEntry + }) + } + handled := handlerEntry.Handle(route) + checkInterceptionIfNeeded() + yes := <-handled + if yes { + return + } + } + // If the page is closed or unrouteAll() was called without waiting and interception disabled, + // the method will throw an error - silence it. + _ = route.internalContinue(true) +} + +func (b *browserContextImpl) updateInterceptionPatterns() error { + patterns := prepareInterceptionPatterns(b.routes) + _, err := b.channel.Send("setNetworkInterceptionPatterns", map[string]interface{}{ + "patterns": patterns, + }) + return err +} + +func (b *browserContextImpl) pause() <-chan error { + ret := make(chan error, 1) + go func() { + _, err := b.channel.Send("pause") + ret <- err + }() + return ret +} + +func (b *browserContextImpl) onBackgroundPage(ev map[string]interface{}) { + b.Lock() + p := fromChannel(ev["page"]).(*pageImpl) + p.browserContext = b + b.backgroundPages = append(b.backgroundPages, p) + b.Unlock() + b.Emit("backgroundpage", p) +} + +func (b *browserContextImpl) onServiceWorker(worker *workerImpl) { + worker.context = b + b.serviceWorkers = append(b.serviceWorkers, worker) + b.Emit("serviceworker", worker) +} + +func (b *browserContextImpl) setOptions(options *BrowserNewContextOptions, tracesDir *string) { + if options == nil { + options = &BrowserNewContextOptions{} + } + b.options = options + if b.options != nil && b.options.RecordHarPath != nil { + b.harRecorders[""] = harRecordingMetadata{ + Path: *b.options.RecordHarPath, + Content: b.options.RecordHarContent, + } + } + if tracesDir != nil { + b.tracing.tracesDir = *tracesDir + } +} + +func (b *browserContextImpl) BackgroundPages() []Page { + b.Lock() + defer b.Unlock() + return b.backgroundPages +} + +func (b *browserContextImpl) ServiceWorkers() []Worker { + b.Lock() + defer b.Unlock() + return b.serviceWorkers +} + +func (b *browserContextImpl) OnBackgroundPage(fn func(Page)) { + b.On("backgroundpage", fn) +} + +func (b *browserContextImpl) OnClose(fn func(BrowserContext)) { + b.On("close", fn) +} + +func (b *browserContextImpl) OnConsole(fn func(ConsoleMessage)) { + b.On("console", fn) +} + +func (b *browserContextImpl) OnDialog(fn func(Dialog)) { + b.On("dialog", fn) +} + +func (b *browserContextImpl) OnPage(fn func(Page)) { + b.On("page", fn) +} + +func (b *browserContextImpl) OnRequest(fn func(Request)) { + b.On("request", fn) +} + +func (b *browserContextImpl) OnRequestFailed(fn func(Request)) { + b.On("requestfailed", fn) +} + +func (b *browserContextImpl) OnRequestFinished(fn func(Request)) { + b.On("requestfinished", fn) +} + +func (b *browserContextImpl) OnResponse(fn func(Response)) { + b.On("response", fn) +} + +func (b *browserContextImpl) OnWebError(fn func(WebError)) { + b.On("weberror", fn) +} + +func (b *browserContextImpl) RouteWebSocket(url interface{}, handler func(WebSocketRoute)) error { + b.Lock() + defer b.Unlock() + b.webSocketRoutes = slices.Insert(b.webSocketRoutes, 0, newWebSocketRouteHandler(newURLMatcher(url, b.options.BaseURL), handler)) + + return b.updateWebSocketInterceptionPatterns() +} + +func (b *browserContextImpl) onWebSocketRoute(wr WebSocketRoute) { + b.Lock() + index := slices.IndexFunc(b.webSocketRoutes, func(r *webSocketRouteHandler) bool { + return r.Matches(wr.URL()) + }) + if index == -1 { + b.Unlock() + _, err := wr.ConnectToServer() + if err != nil { + logger.Error("could not connect to WebSocket server", "error", err) + } + return + } + handler := b.webSocketRoutes[index] + b.Unlock() + handler.Handle(wr) +} + +func (b *browserContextImpl) updateWebSocketInterceptionPatterns() error { + patterns := prepareWebSocketRouteHandlerInterceptionPatterns(b.webSocketRoutes) + _, err := b.channel.Send("setWebSocketInterceptionPatterns", map[string]interface{}{ + "patterns": patterns, + }) + return err +} + +func (b *browserContextImpl) effectiveCloseReason() *string { + b.Lock() + defer b.Unlock() + if b.closeReason != nil { + return b.closeReason + } + if b.browser != nil { + return b.browser.closeReason + } + return nil +} + +func newBrowserContext(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *browserContextImpl { + bt := &browserContextImpl{ + timeoutSettings: newTimeoutSettings(nil), + pages: make([]Page, 0), + backgroundPages: make([]Page, 0), + routes: make([]*routeHandlerEntry, 0), + bindings: safe.NewSyncMap[string, BindingCallFunction](), + harRecorders: make(map[string]harRecordingMetadata), + closed: make(chan struct{}, 1), + harRouters: make([]*harRouter, 0), + } + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + if parent.objectType == "Browser" { + bt.browser = fromChannel(parent.channel).(*browserImpl) + bt.browser.contexts = append(bt.browser.contexts, bt) + } + bt.tracing = fromChannel(initializer["tracing"]).(*tracingImpl) + bt.request = fromChannel(initializer["requestContext"]).(*apiRequestContextImpl) + bt.clock = newClock(bt) + bt.channel.On("bindingCall", func(params map[string]interface{}) { + bt.onBinding(fromChannel(params["binding"]).(*bindingCallImpl)) + }) + + bt.channel.On("close", bt.onClose) + bt.channel.On("page", func(payload map[string]interface{}) { + bt.onPage(fromChannel(payload["page"]).(*pageImpl)) + }) + bt.channel.On("route", func(params map[string]interface{}) { + bt.channel.CreateTask(func() { + bt.onRoute(fromChannel(params["route"]).(*routeImpl)) + }) + }) + bt.channel.On("webSocketRoute", func(params map[string]interface{}) { + bt.channel.CreateTask(func() { + bt.onWebSocketRoute(fromChannel(params["webSocketRoute"]).(*webSocketRouteImpl)) + }) + }) + bt.channel.On("backgroundPage", bt.onBackgroundPage) + bt.channel.On("serviceWorker", func(params map[string]interface{}) { + bt.onServiceWorker(fromChannel(params["worker"]).(*workerImpl)) + }) + bt.channel.On("console", func(ev map[string]interface{}) { + message := newConsoleMessage(ev) + bt.Emit("console", message) + if message.page != nil { + message.page.Emit("console", message) + } + }) + bt.channel.On("dialog", func(params map[string]interface{}) { + dialog := fromChannel(params["dialog"]).(*dialogImpl) + go func() { + hasListeners := bt.Emit("dialog", dialog) + page := dialog.page + if page != nil { + if page.Emit("dialog", dialog) { + hasListeners = true + } + } + if !hasListeners { + // Although we do similar handling on the server side, we still need this logic + // on the client side due to a possible race condition between two async calls: + // a) removing "dialog" listener subscription (client->server) + // b) actual "dialog" event (server->client) + if dialog.Type() == "beforeunload" { + _ = dialog.Accept() + } else { + _ = dialog.Dismiss() + } + } + }() + }) + bt.channel.On( + "pageError", func(ev map[string]interface{}) { + pwErr := &Error{} + remapMapToStruct(ev["error"].(map[string]interface{})["error"], pwErr) + err := parseError(*pwErr) + page := fromNullableChannel(ev["page"]) + if page != nil { + bt.Emit("weberror", newWebError(page.(*pageImpl), err)) + page.(*pageImpl).Emit("pageerror", err) + } else { + bt.Emit("weberror", newWebError(nil, err)) + } + }, + ) + bt.channel.On("request", func(ev map[string]interface{}) { + request := fromChannel(ev["request"]).(*requestImpl) + page := fromNullableChannel(ev["page"]) + bt.Emit("request", request) + if page != nil { + page.(*pageImpl).Emit("request", request) + } + }) + bt.channel.On("requestFailed", func(ev map[string]interface{}) { + request := fromChannel(ev["request"]).(*requestImpl) + failureText := ev["failureText"] + if failureText != nil { + request.failureText = failureText.(string) + } + page := fromNullableChannel(ev["page"]) + request.setResponseEndTiming(ev["responseEndTiming"].(float64)) + bt.Emit("requestfailed", request) + if page != nil { + page.(*pageImpl).Emit("requestfailed", request) + } + }) + + bt.channel.On("requestFinished", func(ev map[string]interface{}) { + request := fromChannel(ev["request"]).(*requestImpl) + response := fromNullableChannel(ev["response"]) + page := fromNullableChannel(ev["page"]) + request.setResponseEndTiming(ev["responseEndTiming"].(float64)) + bt.Emit("requestfinished", request) + if page != nil { + page.(*pageImpl).Emit("requestfinished", request) + } + if response != nil { + close(response.(*responseImpl).finished) + } + }) + bt.channel.On("response", func(ev map[string]interface{}) { + response := fromChannel(ev["response"]).(*responseImpl) + page := fromNullableChannel(ev["page"]) + bt.Emit("response", response) + if page != nil { + page.(*pageImpl).Emit("response", response) + } + }) + bt.Once("close", func() { + bt.closed <- struct{}{} + }) + bt.setEventSubscriptionMapping(map[string]string{ + "console": "console", + "dialog": "dialog", + "request": "request", + "response": "response", + "requestfinished": "requestFinished", + "responsefailed": "responseFailed", + }) + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/browser_type.go b/vendor/github.com/playwright-community/playwright-go/browser_type.go new file mode 100644 index 0000000..41a8b18 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/browser_type.go @@ -0,0 +1,181 @@ +package playwright + +import ( + "fmt" +) + +type browserTypeImpl struct { + channelOwner + playwright *Playwright +} + +func (b *browserTypeImpl) Name() string { + return b.initializer["name"].(string) +} + +func (b *browserTypeImpl) ExecutablePath() string { + return b.initializer["executablePath"].(string) +} + +func (b *browserTypeImpl) Launch(options ...BrowserTypeLaunchOptions) (Browser, error) { + overrides := map[string]interface{}{} + if len(options) == 1 && options[0].Env != nil { + overrides["env"] = serializeMapToNameAndValue(options[0].Env) + options[0].Env = nil + } + channel, err := b.channel.Send("launch", options, overrides) + if err != nil { + return nil, err + } + browser := fromChannel(channel).(*browserImpl) + b.didLaunchBrowser(browser) + return browser, nil +} + +func (b *browserTypeImpl) LaunchPersistentContext(userDataDir string, options ...BrowserTypeLaunchPersistentContextOptions) (BrowserContext, error) { + overrides := map[string]interface{}{ + "userDataDir": userDataDir, + } + option := &BrowserNewContextOptions{} + var tracesDir *string = nil + if len(options) == 1 { + tracesDir = options[0].TracesDir + err := assignStructFields(option, options[0], true) + if err != nil { + return nil, fmt.Errorf("can not convert options: %w", err) + } + if options[0].AcceptDownloads != nil { + if *options[0].AcceptDownloads { + overrides["acceptDownloads"] = "accept" + } else { + overrides["acceptDownloads"] = "deny" + } + options[0].AcceptDownloads = nil + } + if options[0].ClientCertificates != nil { + certs, err := transformClientCertificate(options[0].ClientCertificates) + if err != nil { + return nil, err + } + overrides["clientCertificates"] = certs + options[0].ClientCertificates = nil + } + if options[0].ExtraHttpHeaders != nil { + overrides["extraHTTPHeaders"] = serializeMapToNameAndValue(options[0].ExtraHttpHeaders) + options[0].ExtraHttpHeaders = nil + } + if options[0].Env != nil { + overrides["env"] = serializeMapToNameAndValue(options[0].Env) + options[0].Env = nil + } + if options[0].NoViewport != nil && *options[0].NoViewport { + overrides["noDefaultViewport"] = true + options[0].NoViewport = nil + } + if options[0].RecordHarPath != nil { + overrides["recordHar"] = prepareRecordHarOptions(recordHarInputOptions{ + Path: *options[0].RecordHarPath, + URL: options[0].RecordHarURLFilter, + Mode: options[0].RecordHarMode, + Content: options[0].RecordHarContent, + OmitContent: options[0].RecordHarOmitContent, + }) + options[0].RecordHarPath = nil + options[0].RecordHarURLFilter = nil + options[0].RecordHarMode = nil + options[0].RecordHarContent = nil + options[0].RecordHarOmitContent = nil + } + } + channel, err := b.channel.Send("launchPersistentContext", options, overrides) + if err != nil { + return nil, err + } + context := fromChannel(channel).(*browserContextImpl) + b.didCreateContext(context, option, tracesDir) + return context, nil +} + +func (b *browserTypeImpl) Connect(wsEndpoint string, options ...BrowserTypeConnectOptions) (Browser, error) { + overrides := map[string]interface{}{ + "wsEndpoint": wsEndpoint, + "headers": map[string]string{ + "x-playwright-browser": b.Name(), + }, + } + if len(options) == 1 { + if options[0].Headers != nil { + for k, v := range options[0].Headers { + overrides["headers"].(map[string]string)[k] = v + } + options[0].Headers = nil + } + } + localUtils := b.connection.LocalUtils() + pipe, err := localUtils.channel.SendReturnAsDict("connect", options, overrides) + if err != nil { + return nil, err + } + jsonPipe := fromChannel(pipe["pipe"]).(*jsonPipe) + connection := newConnection(jsonPipe, localUtils) + + playwright, err := connection.Start() + if err != nil { + return nil, err + } + playwright.setSelectors(b.playwright.Selectors) + browser := fromChannel(playwright.initializer["preLaunchedBrowser"]).(*browserImpl) + browser.shouldCloseConnectionOnClose = true + pipeClosed := func() { + for _, context := range browser.Contexts() { + pages := context.Pages() + for _, page := range pages { + page.(*pageImpl).onClose() + } + context.(*browserContextImpl).onClose() + } + browser.onClose() + connection.cleanup() + } + jsonPipe.On("closed", pipeClosed) + + b.didLaunchBrowser(browser) + return browser, nil +} + +func (b *browserTypeImpl) ConnectOverCDP(endpointURL string, options ...BrowserTypeConnectOverCDPOptions) (Browser, error) { + overrides := map[string]interface{}{ + "endpointURL": endpointURL, + } + if len(options) == 1 { + if options[0].Headers != nil { + overrides["headers"] = serializeMapToNameAndValue(options[0].Headers) + options[0].Headers = nil + } + } + response, err := b.channel.SendReturnAsDict("connectOverCDP", options, overrides) + if err != nil { + return nil, err + } + browser := fromChannel(response["browser"]).(*browserImpl) + b.didLaunchBrowser(browser) + if defaultContext, ok := response["defaultContext"]; ok { + context := fromChannel(defaultContext).(*browserContextImpl) + b.didCreateContext(context, nil, nil) + } + return browser, nil +} + +func (b *browserTypeImpl) didCreateContext(context *browserContextImpl, contextOptions *BrowserNewContextOptions, tracesDir *string) { + context.setOptions(contextOptions, tracesDir) +} + +func (b *browserTypeImpl) didLaunchBrowser(browser *browserImpl) { + browser.browserType = b +} + +func newBrowserType(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *browserTypeImpl { + bt := &browserTypeImpl{} + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/cdp_session.go b/vendor/github.com/playwright-community/playwright-go/cdp_session.go new file mode 100644 index 0000000..e9bba82 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/cdp_session.go @@ -0,0 +1,38 @@ +package playwright + +type cdpSessionImpl struct { + channelOwner +} + +func (c *cdpSessionImpl) Detach() error { + _, err := c.channel.Send("detach") + return err +} + +func (c *cdpSessionImpl) Send(method string, params map[string]interface{}) (interface{}, error) { + result, err := c.channel.Send("send", map[string]interface{}{ + "method": method, + "params": params, + }) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *cdpSessionImpl) onEvent(params map[string]interface{}) { + c.Emit(params["method"].(string), params["params"]) +} + +func newCDPSession(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *cdpSessionImpl { + bt := &cdpSessionImpl{} + + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + + bt.channel.On("event", func(params map[string]interface{}) { + bt.onEvent(params) + }) + + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/channel.go b/vendor/github.com/playwright-community/playwright-go/channel.go new file mode 100644 index 0000000..b0bded4 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/channel.go @@ -0,0 +1,92 @@ +package playwright + +import ( + "encoding/json" + "fmt" +) + +type channel struct { + eventEmitter + guid string + connection *connection + owner *channelOwner // to avoid type conversion + object interface{} // retain type info (for fromChannel needed) +} + +func (c *channel) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{ + "guid": c.guid, + }) +} + +// for catch errors of route handlers etc. +func (c *channel) CreateTask(fn func()) { + go func() { + defer func() { + if e := recover(); e != nil { + err, ok := e.(error) + if ok { + c.connection.err.Set(err) + } else { + c.connection.err.Set(fmt.Errorf("%v", e)) + } + } + }() + fn() + }() +} + +func (c *channel) Send(method string, options ...interface{}) (interface{}, error) { + return c.connection.WrapAPICall(func() (interface{}, error) { + return c.innerSend(method, options...).GetResultValue() + }, c.owner.isInternalType) +} + +func (c *channel) SendReturnAsDict(method string, options ...interface{}) (map[string]interface{}, error) { + ret, err := c.connection.WrapAPICall(func() (interface{}, error) { + return c.innerSend(method, options...).GetResult() + }, c.owner.isInternalType) + return ret.(map[string]interface{}), err +} + +func (c *channel) innerSend(method string, options ...interface{}) *protocolCallback { + if err := c.connection.err.Get(); err != nil { + c.connection.err.Set(nil) + pc := newProtocolCallback(false, c.connection.abort) + pc.SetError(err) + return pc + } + params := transformOptions(options...) + return c.connection.sendMessageToServer(c.owner, method, params, false) +} + +// SendNoReply ignores return value and errors +// almost equivalent to `send(...).catch(() => {})` +func (c *channel) SendNoReply(method string, options ...interface{}) { + c.innerSendNoReply(method, c.owner.isInternalType, options...) +} + +func (c *channel) SendNoReplyInternal(method string, options ...interface{}) { + c.innerSendNoReply(method, true, options...) +} + +func (c *channel) innerSendNoReply(method string, isInternal bool, options ...interface{}) { + params := transformOptions(options...) + _, err := c.connection.WrapAPICall(func() (interface{}, error) { + return c.connection.sendMessageToServer(c.owner, method, params, true).GetResult() + }, isInternal) + if err != nil { + // ignore error actively, log only for debug + logger.Error("SendNoReply failed", "error", err) + } +} + +func newChannel(owner *channelOwner, object interface{}) *channel { + channel := &channel{ + connection: owner.connection, + guid: owner.guid, + owner: owner, + object: object, + } + return channel +} diff --git a/vendor/github.com/playwright-community/playwright-go/channel_owner.go b/vendor/github.com/playwright-community/playwright-go/channel_owner.go new file mode 100644 index 0000000..5159eb2 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/channel_owner.go @@ -0,0 +1,122 @@ +package playwright + +import ( + "sync" +) + +type channelOwner struct { + sync.RWMutex + eventEmitter + objectType string + guid string + channel *channel + objects map[string]*channelOwner + eventToSubscriptionMapping map[string]string + connection *connection + initializer map[string]interface{} + parent *channelOwner + wasCollected bool + isInternalType bool +} + +func (c *channelOwner) dispose(reason ...string) { + // Clean up from parent and connection. + if c.parent != nil { + delete(c.parent.objects, c.guid) + } + c.connection.objects.Delete(c.guid) + if len(reason) > 0 { + c.wasCollected = reason[0] == "gc" + } + + // Dispose all children. + for _, object := range c.objects { + object.dispose(reason...) + } + c.objects = make(map[string]*channelOwner) +} + +func (c *channelOwner) adopt(child *channelOwner) { + delete(child.parent.objects, child.guid) + c.objects[child.guid] = child + child.parent = c +} + +func (c *channelOwner) setEventSubscriptionMapping(mapping map[string]string) { + c.eventToSubscriptionMapping = mapping +} + +func (c *channelOwner) updateSubscription(event string, enabled bool) { + protocolEvent, ok := c.eventToSubscriptionMapping[event] + if ok { + c.channel.SendNoReplyInternal("updateSubscription", map[string]interface{}{ + "event": protocolEvent, + "enabled": enabled, + }) + } +} + +func (c *channelOwner) Once(name string, handler interface{}) { + c.addEvent(name, handler, true) +} + +func (c *channelOwner) On(name string, handler interface{}) { + c.addEvent(name, handler, false) +} + +func (c *channelOwner) addEvent(name string, handler interface{}, once bool) { + if c.ListenerCount(name) == 0 { + c.updateSubscription(name, true) + } + c.eventEmitter.addEvent(name, handler, once) +} + +func (c *channelOwner) RemoveListener(name string, handler interface{}) { + c.eventEmitter.RemoveListener(name, handler) + if c.ListenerCount(name) == 0 { + c.updateSubscription(name, false) + } +} + +func (c *channelOwner) createChannelOwner(self interface{}, parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) { + c.objectType = objectType + c.guid = guid + c.wasCollected = false + c.parent = parent + c.objects = make(map[string]*channelOwner) + c.initializer = initializer + if c.parent != nil { + c.connection = parent.connection + c.parent.objects[guid] = c + } + if c.connection != nil { + c.connection.objects.Store(guid, c) + } + c.channel = newChannel(c, self) + c.eventToSubscriptionMapping = map[string]string{} +} + +func (c *channelOwner) markAsInternalType() { + c.isInternalType = true +} + +type rootChannelOwner struct { + channelOwner +} + +func (r *rootChannelOwner) initialize() (*Playwright, error) { + ret, err := r.channel.SendReturnAsDict("initialize", map[string]interface{}{ + "sdkLanguage": "javascript", + }) + if err != nil { + return nil, err + } + return fromChannel(ret["playwright"]).(*Playwright), nil +} + +func newRootChannelOwner(connection *connection) *rootChannelOwner { + c := &rootChannelOwner{} + c.connection = connection + c.createChannelOwner(c, nil, "Root", "", make(map[string]interface{})) + return c +} diff --git a/vendor/github.com/playwright-community/playwright-go/clock.go b/vendor/github.com/playwright-community/playwright-go/clock.go new file mode 100644 index 0000000..8bab037 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/clock.go @@ -0,0 +1,111 @@ +package playwright + +import ( + "errors" + "time" +) + +type clockImpl struct { + browserCtx *browserContextImpl +} + +func newClock(bCtx *browserContextImpl) Clock { + return &clockImpl{ + browserCtx: bCtx, + } +} + +func (c *clockImpl) FastForward(ticks interface{}) error { + params, err := parseTicks(ticks) + if err != nil { + return err + } + + _, err = c.browserCtx.channel.Send("clockFastForward", params) + return err +} + +func (c *clockImpl) Install(options ...ClockInstallOptions) (err error) { + params := map[string]any{} + if len(options) == 1 { + if options[0].Time != nil { + params, err = parseTime(options[0].Time) + if err != nil { + return err + } + } + } + + _, err = c.browserCtx.channel.Send("clockInstall", params) + + return err +} + +func (c *clockImpl) PauseAt(time interface{}) error { + params, err := parseTime(time) + if err != nil { + return err + } + + _, err = c.browserCtx.channel.Send("clockPauseAt", params) + return err +} + +func (c *clockImpl) Resume() error { + _, err := c.browserCtx.channel.Send("clockResume") + return err +} + +func (c *clockImpl) RunFor(ticks interface{}) error { + params, err := parseTicks(ticks) + if err != nil { + return err + } + + _, err = c.browserCtx.channel.Send("clockRunFor", params) + return err +} + +func (c *clockImpl) SetFixedTime(time interface{}) error { + params, err := parseTime(time) + if err != nil { + return err + } + + _, err = c.browserCtx.channel.Send("clockSetFixedTime", params) + return err +} + +func (c *clockImpl) SetSystemTime(time interface{}) error { + params, err := parseTime(time) + if err != nil { + return err + } + + _, err = c.browserCtx.channel.Send("clockSetSystemTime", params) + return err +} + +func parseTime(t interface{}) (map[string]any, error) { + switch v := t.(type) { + case int, int64: + return map[string]any{"timeNumber": v}, nil + case string: + return map[string]any{"timeString": v}, nil + case time.Time: + return map[string]any{"timeNumber": v.UnixMilli()}, nil + default: + return nil, errors.New("time should be one of: int, int64, string, time.Time") + } +} + +func parseTicks(ticks interface{}) (map[string]any, error) { + switch v := ticks.(type) { + case int, int64: + return map[string]any{"ticksNumber": v}, nil + case string: + return map[string]any{"ticksString": v}, nil + default: + return nil, errors.New("ticks should be one of: int, int64, string") + } +} diff --git a/vendor/github.com/playwright-community/playwright-go/cmd/playwright/main.go b/vendor/github.com/playwright-community/playwright-go/cmd/playwright/main.go new file mode 100644 index 0000000..95e7d85 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/cmd/playwright/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "log" + "os" + + "github.com/playwright-community/playwright-go" +) + +func main() { + driver, err := playwright.NewDriver(&playwright.RunOptions{}) + if err != nil { + log.Fatalf("could not start driver: %v", err) + } + if err = driver.DownloadDriver(); err != nil { + log.Fatalf("could not download driver: %v", err) + } + cmd := driver.Command(os.Args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + log.Fatalf("could not run driver: %v", err) + } + os.Exit(cmd.ProcessState.ExitCode()) +} diff --git a/vendor/github.com/playwright-community/playwright-go/connection.go b/vendor/github.com/playwright-community/playwright-go/connection.go new file mode 100644 index 0000000..ba1e365 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/connection.go @@ -0,0 +1,401 @@ +package playwright + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-stack/stack" + "github.com/playwright-community/playwright-go/internal/safe" +) + +var ( + pkgSourcePathPattern = regexp.MustCompile(`.+[\\/]playwright-go[\\/][^\\/]+\.go`) + apiNameTransform = regexp.MustCompile(`(?U)\(\*(.+)(Impl)?\)`) +) + +type connection struct { + transport transport + apiZone sync.Map + objects *safe.SyncMap[string, *channelOwner] + lastID atomic.Uint32 + rootObject *rootChannelOwner + callbacks *safe.SyncMap[uint32, *protocolCallback] + afterClose func() + onClose func() error + isRemote bool + localUtils *localUtilsImpl + tracingCount atomic.Int32 + abort chan struct{} + abortOnce sync.Once + err *safeValue[error] // for event listener error + closedError *safeValue[error] +} + +func (c *connection) Start() (*Playwright, error) { + go func() { + for { + msg, err := c.transport.Poll() + if err != nil { + _ = c.transport.Close() + c.cleanup(err) + return + } + c.Dispatch(msg) + } + }() + + c.onClose = func() error { + if err := c.transport.Close(); err != nil { + return err + } + return nil + } + + return c.rootObject.initialize() +} + +func (c *connection) Stop() error { + if err := c.onClose(); err != nil { + return err + } + c.cleanup() + return nil +} + +func (c *connection) cleanup(cause ...error) { + if len(cause) > 0 { + c.closedError.Set(fmt.Errorf("%w: %w", ErrTargetClosed, cause[0])) + } else { + c.closedError.Set(ErrTargetClosed) + } + if c.afterClose != nil { + c.afterClose() + } + c.abortOnce.Do(func() { + select { + case <-c.abort: + default: + close(c.abort) + } + }) +} + +func (c *connection) Dispatch(msg *message) { + if c.closedError.Get() != nil { + return + } + method := msg.Method + if msg.ID != 0 { + cb, _ := c.callbacks.LoadAndDelete(uint32(msg.ID)) + if cb.noReply { + return + } + if msg.Error != nil { + cb.SetError(parseError(msg.Error.Error)) + } else { + cb.SetResult(c.replaceGuidsWithChannels(msg.Result).(map[string]interface{})) + } + return + } + object, _ := c.objects.Load(msg.GUID) + if method == "__create__" { + c.createRemoteObject( + object, msg.Params["type"].(string), msg.Params["guid"].(string), msg.Params["initializer"], + ) + return + } + if object == nil { + return + } + if method == "__adopt__" { + child, ok := c.objects.Load(msg.Params["guid"].(string)) + if !ok { + return + } + object.adopt(child) + return + } + if method == "__dispose__" { + reason, ok := msg.Params["reason"] + if ok { + object.dispose(reason.(string)) + } else { + object.dispose() + } + return + } + if object.objectType == "JsonPipe" { + object.channel.Emit(method, msg.Params) + } else { + object.channel.Emit(method, c.replaceGuidsWithChannels(msg.Params)) + } +} + +func (c *connection) LocalUtils() *localUtilsImpl { + return c.localUtils +} + +func (c *connection) createRemoteObject(parent *channelOwner, objectType string, guid string, initializer interface{}) interface{} { + initializer = c.replaceGuidsWithChannels(initializer) + result := createObjectFactory(parent, objectType, guid, initializer.(map[string]interface{})) + return result +} + +func (c *connection) WrapAPICall(cb func() (interface{}, error), isInternal bool) (interface{}, error) { + if _, ok := c.apiZone.Load("apiZone"); ok { + return cb() + } + c.apiZone.Store("apiZone", serializeCallStack(isInternal)) + return cb() +} + +func (c *connection) replaceGuidsWithChannels(payload interface{}) interface{} { + if payload == nil { + return nil + } + v := reflect.ValueOf(payload) + if v.Kind() == reflect.Slice { + listV := payload.([]interface{}) + for i := 0; i < len(listV); i++ { + listV[i] = c.replaceGuidsWithChannels(listV[i]) + } + return listV + } + if v.Kind() == reflect.Map { + mapV := payload.(map[string]interface{}) + if guid, hasGUID := mapV["guid"]; hasGUID { + if channelOwner, ok := c.objects.Load(guid.(string)); ok { + return channelOwner.channel + } + } + for key := range mapV { + mapV[key] = c.replaceGuidsWithChannels(mapV[key]) + } + return mapV + } + return payload +} + +func (c *connection) sendMessageToServer(object *channelOwner, method string, params interface{}, noReply bool) (cb *protocolCallback) { + cb = newProtocolCallback(noReply, c.abort) + + if err := c.closedError.Get(); err != nil { + cb.SetError(err) + return + } + if object.wasCollected { + cb.SetError(errors.New("The object has been collected to prevent unbounded heap growth.")) + return + } + + id := c.lastID.Add(1) + c.callbacks.Store(id, cb) + var ( + metadata = make(map[string]interface{}, 0) + stack = make([]map[string]interface{}, 0) + ) + apiZone, ok := c.apiZone.LoadAndDelete("apiZone") + if ok { + for k, v := range apiZone.(parsedStackTrace).metadata { + metadata[k] = v + } + stack = append(stack, apiZone.(parsedStackTrace).frames...) + } + metadata["wallTime"] = time.Now().UnixMilli() + message := map[string]interface{}{ + "id": id, + "guid": object.guid, + "method": method, + "params": params, // channel.MarshalJSON will replace channel with guid + "metadata": metadata, + } + if c.tracingCount.Load() > 0 && len(stack) > 0 && object.guid != "localUtils" { + c.LocalUtils().AddStackToTracingNoReply(id, stack) + } + + if err := c.transport.Send(message); err != nil { + cb.SetError(fmt.Errorf("could not send message: %w", err)) + return + } + + return +} + +func (c *connection) setInTracing(isTracing bool) { + if isTracing { + c.tracingCount.Add(1) + } else { + c.tracingCount.Add(-1) + } +} + +type parsedStackTrace struct { + frames []map[string]interface{} + metadata map[string]interface{} +} + +func serializeCallStack(isInternal bool) parsedStackTrace { + st := stack.Trace().TrimRuntime() + if len(st) == 0 { // https://github.com/go-stack/stack/issues/27 + st = stack.Trace() + } + + lastInternalIndex := 0 + for i, s := range st { + if pkgSourcePathPattern.MatchString(s.Frame().File) { + lastInternalIndex = i + } + } + apiName := "" + if !isInternal { + apiName = fmt.Sprintf("%n", st[lastInternalIndex]) + } + st = st.TrimBelow(st[lastInternalIndex]) + + callStack := make([]map[string]interface{}, 0) + for i, s := range st { + if i == 0 { + continue + } + callStack = append(callStack, map[string]interface{}{ + "file": s.Frame().File, + "line": s.Frame().Line, + "column": 0, + "function": s.Frame().Function, + }) + } + metadata := make(map[string]interface{}) + if len(st) > 1 { + metadata["location"] = serializeCallLocation(st[1]) + } + apiName = apiNameTransform.ReplaceAllString(apiName, "$1") + if len(apiName) > 1 { + apiName = strings.ToUpper(apiName[:1]) + apiName[1:] + } + metadata["apiName"] = apiName + metadata["isInternal"] = isInternal + return parsedStackTrace{ + metadata: metadata, + frames: callStack, + } +} + +func serializeCallLocation(caller stack.Call) map[string]interface{} { + line, _ := strconv.Atoi(fmt.Sprintf("%d", caller)) + return map[string]interface{}{ + "file": fmt.Sprintf("%s", caller), + "line": line, + } +} + +func newConnection(transport transport, localUtils ...*localUtilsImpl) *connection { + connection := &connection{ + abort: make(chan struct{}, 1), + callbacks: safe.NewSyncMap[uint32, *protocolCallback](), + objects: safe.NewSyncMap[string, *channelOwner](), + transport: transport, + isRemote: false, + err: &safeValue[error]{}, + closedError: &safeValue[error]{}, + } + if len(localUtils) > 0 { + connection.localUtils = localUtils[0] + connection.isRemote = true + } + connection.rootObject = newRootChannelOwner(connection) + return connection +} + +func fromChannel(v interface{}) interface{} { + return v.(*channel).object +} + +func fromNullableChannel(v interface{}) interface{} { + if v == nil { + return nil + } + return fromChannel(v) +} + +type protocolCallback struct { + done chan struct{} + noReply bool + abort <-chan struct{} + once sync.Once + value map[string]interface{} + err error +} + +func (pc *protocolCallback) setResultOnce(result map[string]interface{}, err error) { + pc.once.Do(func() { + pc.value = result + pc.err = err + close(pc.done) + }) +} + +func (pc *protocolCallback) waitResult() { + if pc.noReply { + return + } + select { + case <-pc.done: // wait for result + return + case <-pc.abort: + select { + case <-pc.done: + return + default: + pc.err = errors.New("Connection closed") + return + } + } +} + +func (pc *protocolCallback) SetError(err error) { + pc.setResultOnce(nil, err) +} + +func (pc *protocolCallback) SetResult(result map[string]interface{}) { + pc.setResultOnce(result, nil) +} + +func (pc *protocolCallback) GetResult() (map[string]interface{}, error) { + pc.waitResult() + return pc.value, pc.err +} + +// GetResultValue returns value if the map has only one element +func (pc *protocolCallback) GetResultValue() (interface{}, error) { + pc.waitResult() + if len(pc.value) == 0 { // empty map treated as nil + return nil, pc.err + } + if len(pc.value) == 1 { + for key := range pc.value { + return pc.value[key], pc.err + } + } + + return pc.value, pc.err +} + +func newProtocolCallback(noReply bool, abort <-chan struct{}) *protocolCallback { + if noReply { + return &protocolCallback{ + noReply: true, + abort: abort, + } + } + return &protocolCallback{ + done: make(chan struct{}, 1), + abort: abort, + } +} diff --git a/vendor/github.com/playwright-community/playwright-go/console_message.go b/vendor/github.com/playwright-community/playwright-go/console_message.go new file mode 100644 index 0000000..4baf3f1 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/console_message.go @@ -0,0 +1,47 @@ +package playwright + +type consoleMessageImpl struct { + event map[string]interface{} + page Page +} + +func (c *consoleMessageImpl) Type() string { + return c.event["type"].(string) +} + +func (c *consoleMessageImpl) Text() string { + return c.event["text"].(string) +} + +func (c *consoleMessageImpl) String() string { + return c.Text() +} + +func (c *consoleMessageImpl) Args() []JSHandle { + args := c.event["args"].([]interface{}) + out := []JSHandle{} + for idx := range args { + out = append(out, fromChannel(args[idx]).(*jsHandleImpl)) + } + return out +} + +func (c *consoleMessageImpl) Location() *ConsoleMessageLocation { + location := &ConsoleMessageLocation{} + remapMapToStruct(c.event["location"], location) + return location +} + +func (c *consoleMessageImpl) Page() Page { + return c.page +} + +func newConsoleMessage(event map[string]interface{}) *consoleMessageImpl { + bt := &consoleMessageImpl{} + bt.event = event + page := fromNullableChannel(event["page"]) + if page != nil { + bt.page = page.(*pageImpl) + } + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/dialog.go b/vendor/github.com/playwright-community/playwright-go/dialog.go new file mode 100644 index 0000000..8d13234 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/dialog.go @@ -0,0 +1,48 @@ +package playwright + +type dialogImpl struct { + channelOwner + page Page +} + +func (d *dialogImpl) Type() string { + return d.initializer["type"].(string) +} + +func (d *dialogImpl) Message() string { + return d.initializer["message"].(string) +} + +func (d *dialogImpl) DefaultValue() string { + return d.initializer["defaultValue"].(string) +} + +func (d *dialogImpl) Accept(promptTextInput ...string) error { + var promptText *string + if len(promptTextInput) == 1 { + promptText = &promptTextInput[0] + } + _, err := d.channel.Send("accept", map[string]interface{}{ + "promptText": promptText, + }) + return err +} + +func (d *dialogImpl) Dismiss() error { + _, err := d.channel.Send("dismiss") + return err +} + +func (d *dialogImpl) Page() Page { + return d.page +} + +func newDialog(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *dialogImpl { + bt := &dialogImpl{} + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + page := fromNullableChannel(initializer["page"]) + if page != nil { + bt.page = page.(*pageImpl) + } + return bt +} diff --git a/vendor/github.com/playwright-community/playwright-go/download.go b/vendor/github.com/playwright-community/playwright-go/download.go new file mode 100644 index 0000000..b9d2024 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/download.go @@ -0,0 +1,56 @@ +package playwright + +type downloadImpl struct { + page *pageImpl + url string + suggestedFilename string + artifact *artifactImpl +} + +func (d *downloadImpl) String() string { + return d.SuggestedFilename() +} + +func (d *downloadImpl) Page() Page { + return d.page +} + +func (d *downloadImpl) URL() string { + return d.url +} + +func (d *downloadImpl) SuggestedFilename() string { + return d.suggestedFilename +} + +func (d *downloadImpl) Delete() error { + err := d.artifact.Delete() + return err +} + +func (d *downloadImpl) Failure() error { + return d.artifact.Failure() +} + +func (d *downloadImpl) Path() (string, error) { + path, err := d.artifact.PathAfterFinished() + return path, err +} + +func (d *downloadImpl) SaveAs(path string) error { + err := d.artifact.SaveAs(path) + return err +} + +func (d *downloadImpl) Cancel() error { + return d.artifact.Cancel() +} + +func newDownload(page *pageImpl, url string, suggestedFilename string, artifact *artifactImpl) *downloadImpl { + return &downloadImpl{ + page: page, + url: url, + suggestedFilename: suggestedFilename, + artifact: artifact, + } +} diff --git a/vendor/github.com/playwright-community/playwright-go/element_handle.go b/vendor/github.com/playwright-community/playwright-go/element_handle.go new file mode 100644 index 0000000..62c41ba --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/element_handle.go @@ -0,0 +1,403 @@ +package playwright + +import ( + "encoding/base64" + "errors" + "fmt" + "os" +) + +type elementHandleImpl struct { + jsHandleImpl +} + +func (e *elementHandleImpl) AsElement() ElementHandle { + return e +} + +func (e *elementHandleImpl) OwnerFrame() (Frame, error) { + channel, err := e.channel.Send("ownerFrame") + if err != nil { + return nil, err + } + channelOwner := fromNullableChannel(channel) + if channelOwner == nil { + return nil, nil + } + return channelOwner.(*frameImpl), nil +} + +func (e *elementHandleImpl) ContentFrame() (Frame, error) { + channel, err := e.channel.Send("contentFrame") + if err != nil { + return nil, err + } + channelOwner := fromNullableChannel(channel) + if channelOwner == nil { + return nil, nil + } + return channelOwner.(*frameImpl), nil +} + +func (e *elementHandleImpl) GetAttribute(name string) (string, error) { + attribute, err := e.channel.Send("getAttribute", map[string]interface{}{ + "name": name, + }) + if attribute == nil { + return "", err + } + return attribute.(string), err +} + +func (e *elementHandleImpl) TextContent() (string, error) { + textContent, err := e.channel.Send("textContent") + if textContent == nil { + return "", err + } + return textContent.(string), err +} + +func (e *elementHandleImpl) InnerText() (string, error) { + innerText, err := e.channel.Send("innerText") + if innerText == nil { + return "", err + } + return innerText.(string), err +} + +func (e *elementHandleImpl) InnerHTML() (string, error) { + innerHTML, err := e.channel.Send("innerHTML") + if innerHTML == nil { + return "", err + } + return innerHTML.(string), err +} + +func (e *elementHandleImpl) DispatchEvent(typ string, initObjects ...interface{}) error { + var initObject interface{} + if len(initObjects) == 1 { + initObject = initObjects[0] + } + _, err := e.channel.Send("dispatchEvent", map[string]interface{}{ + "type": typ, + "eventInit": serializeArgument(initObject), + }) + return err +} + +func (e *elementHandleImpl) Hover(options ...ElementHandleHoverOptions) error { + _, err := e.channel.Send("hover", options) + return err +} + +func (e *elementHandleImpl) Click(options ...ElementHandleClickOptions) error { + _, err := e.channel.Send("click", options) + return err +} + +func (e *elementHandleImpl) Dblclick(options ...ElementHandleDblclickOptions) error { + _, err := e.channel.Send("dblclick", options) + return err +} + +func (e *elementHandleImpl) QuerySelector(selector string) (ElementHandle, error) { + channel, err := e.channel.Send("querySelector", map[string]interface{}{ + "selector": selector, + }) + if err != nil { + return nil, err + } + if channel == nil { + return nil, nil + } + return fromChannel(channel).(*elementHandleImpl), nil +} + +func (e *elementHandleImpl) QuerySelectorAll(selector string) ([]ElementHandle, error) { + channels, err := e.channel.Send("querySelectorAll", map[string]interface{}{ + "selector": selector, + }) + if err != nil { + return nil, err + } + elements := make([]ElementHandle, 0) + for _, channel := range channels.([]interface{}) { + elements = append(elements, fromChannel(channel).(*elementHandleImpl)) + } + return elements, nil +} + +func (e *elementHandleImpl) EvalOnSelector(selector string, expression string, options ...interface{}) (interface{}, error) { + var arg interface{} + if len(options) == 1 { + arg = options[0] + } + result, err := e.channel.Send("evalOnSelector", map[string]interface{}{ + "selector": selector, + "expression": expression, + "arg": serializeArgument(arg), + }) + if err != nil { + return nil, err + } + return parseResult(result), nil +} + +func (e *elementHandleImpl) EvalOnSelectorAll(selector string, expression string, options ...interface{}) (interface{}, error) { + var arg interface{} + if len(options) == 1 { + arg = options[0] + } + result, err := e.channel.Send("evalOnSelectorAll", map[string]interface{}{ + "selector": selector, + "expression": expression, + "arg": serializeArgument(arg), + }) + if err != nil { + return nil, err + } + return parseResult(result), nil +} + +func (e *elementHandleImpl) ScrollIntoViewIfNeeded(options ...ElementHandleScrollIntoViewIfNeededOptions) error { + _, err := e.channel.Send("scrollIntoViewIfNeeded", options) + if err != nil { + return err + } + return err +} + +func (e *elementHandleImpl) SetInputFiles(files interface{}, options ...ElementHandleSetInputFilesOptions) error { + frame, err := e.OwnerFrame() + if err != nil { + return err + } + if frame == nil { + return errors.New("Cannot set input files to detached element") + } + + params, err := convertInputFiles(files, frame.(*frameImpl).page.browserContext) + if err != nil { + return err + } + _, err = e.channel.Send("setInputFiles", params, options) + return err +} + +func (e *elementHandleImpl) BoundingBox() (*Rect, error) { + boundingBox, err := e.channel.Send("boundingBox") + if err != nil { + return nil, err + } + + if boundingBox == nil { + return nil, nil + } + + out := &Rect{} + remapMapToStruct(boundingBox, out) + return out, nil +} + +func (e *elementHandleImpl) Check(options ...ElementHandleCheckOptions) error { + _, err := e.channel.Send("check", options) + return err +} + +func (e *elementHandleImpl) Uncheck(options ...ElementHandleUncheckOptions) error { + _, err := e.channel.Send("uncheck", options) + return err +} + +func (e *elementHandleImpl) Press(key string, options ...ElementHandlePressOptions) error { + _, err := e.channel.Send("press", map[string]interface{}{ + "key": key, + }, options) + return err +} + +func (e *elementHandleImpl) Fill(value string, options ...ElementHandleFillOptions) error { + _, err := e.channel.Send("fill", map[string]interface{}{ + "value": value, + }, options) + return err +} + +func (e *elementHandleImpl) Type(value string, options ...ElementHandleTypeOptions) error { + _, err := e.channel.Send("type", map[string]interface{}{ + "text": value, + }, options) + return err +} + +func (e *elementHandleImpl) Focus() error { + _, err := e.channel.Send("focus") + return err +} + +func (e *elementHandleImpl) SelectText(options ...ElementHandleSelectTextOptions) error { + _, err := e.channel.Send("selectText", options) + return err +} + +func (e *elementHandleImpl) Screenshot(options ...ElementHandleScreenshotOptions) ([]byte, error) { + var path *string + overrides := map[string]interface{}{} + if len(options) == 1 { + path = options[0].Path + options[0].Path = nil + if options[0].Mask != nil { + masks := make([]map[string]interface{}, 0) + for _, m := range options[0].Mask { + if m.Err() != nil { // ErrLocatorNotSameFrame + return nil, m.Err() + } + l, ok := m.(*locatorImpl) + if ok { + masks = append(masks, map[string]interface{}{ + "selector": l.selector, + "frame": l.frame.channel, + }) + } + } + overrides["mask"] = masks + options[0].Mask = nil + } + } + data, err := e.channel.Send("screenshot", options, overrides) + if err != nil { + return nil, err + } + image, err := base64.StdEncoding.DecodeString(data.(string)) + if err != nil { + return nil, fmt.Errorf("could not decode base64 :%w", err) + } + if path != nil { + if err := os.WriteFile(*path, image, 0o644); err != nil { + return nil, err + } + } + return image, nil +} + +func (e *elementHandleImpl) Tap(options ...ElementHandleTapOptions) error { + _, err := e.channel.Send("tap", options) + return err +} + +func (e *elementHandleImpl) SelectOption(values SelectOptionValues, options ...ElementHandleSelectOptionOptions) ([]string, error) { + opts := convertSelectOptionSet(values) + selected, err := e.channel.Send("selectOption", opts, options) + if err != nil { + return nil, err + } + + return transformToStringList(selected), nil +} + +func (e *elementHandleImpl) IsChecked() (bool, error) { + checked, err := e.channel.Send("isChecked") + if err != nil { + return false, err + } + return checked.(bool), nil +} + +func (e *elementHandleImpl) IsDisabled() (bool, error) { + disabled, err := e.channel.Send("isDisabled") + if err != nil { + return false, err + } + return disabled.(bool), nil +} + +func (e *elementHandleImpl) IsEditable() (bool, error) { + editable, err := e.channel.Send("isEditable") + if err != nil { + return false, err + } + return editable.(bool), nil +} + +func (e *elementHandleImpl) IsEnabled() (bool, error) { + enabled, err := e.channel.Send("isEnabled") + if err != nil { + return false, err + } + return enabled.(bool), nil +} + +func (e *elementHandleImpl) IsHidden() (bool, error) { + hidden, err := e.channel.Send("isHidden") + if err != nil { + return false, err + } + return hidden.(bool), nil +} + +func (e *elementHandleImpl) IsVisible() (bool, error) { + visible, err := e.channel.Send("isVisible") + if err != nil { + return false, err + } + return visible.(bool), nil +} + +func (e *elementHandleImpl) WaitForElementState(state ElementState, options ...ElementHandleWaitForElementStateOptions) error { + _, err := e.channel.Send("waitForElementState", map[string]interface{}{ + "state": state, + }, options) + if err != nil { + return err + } + return nil +} + +func (e *elementHandleImpl) WaitForSelector(selector string, options ...ElementHandleWaitForSelectorOptions) (ElementHandle, error) { + ch, err := e.channel.Send("waitForSelector", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return nil, err + } + + channelOwner := fromNullableChannel(ch) + if channelOwner == nil { + return nil, nil + } + return channelOwner.(*elementHandleImpl), nil +} + +func (e *elementHandleImpl) InputValue(options ...ElementHandleInputValueOptions) (string, error) { + result, err := e.channel.Send("inputValue", options) + if result == nil { + return "", err + } + return result.(string), err +} + +func (e *elementHandleImpl) SetChecked(checked bool, options ...ElementHandleSetCheckedOptions) error { + if checked { + _, err := e.channel.Send("check", options) + return err + } else { + _, err := e.channel.Send("uncheck", options) + return err + } +} + +func newElementHandle(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *elementHandleImpl { + bt := &elementHandleImpl{} + bt.createChannelOwner(bt, parent, objectType, guid, initializer) + return bt +} + +func transformToStringList(in interface{}) []string { + s := in.([]interface{}) + + var out []string + for _, v := range s { + out = append(out, v.(string)) + } + return out +} diff --git a/vendor/github.com/playwright-community/playwright-go/errors.go b/vendor/github.com/playwright-community/playwright-go/errors.go new file mode 100644 index 0000000..36f7396 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/errors.go @@ -0,0 +1,58 @@ +package playwright + +import ( + "errors" + "fmt" +) + +var ( + // ErrPlaywright wraps all Playwright errors. + // - Use errors.Is to check if the error is a Playwright error. + // - Use errors.As to cast an error to [Error] if you want to access "Stack". + ErrPlaywright = errors.New("playwright") + // ErrTargetClosed usually wraps a reason. + ErrTargetClosed = errors.New("target closed") + // ErrTimeout wraps timeout errors. It can be either Playwright TimeoutError or client timeout. + ErrTimeout = errors.New("timeout") +) + +// Error represents a Playwright error +type Error struct { + Name string `json:"name"` + Message string `json:"message"` + Stack string `json:"stack"` +} + +func (e *Error) Error() string { + return e.Message +} + +func (e *Error) Is(target error) bool { + err, ok := target.(*Error) + if !ok { + return false + } + if err.Name != e.Name { + return false + } + if e.Name != "Error" { + return true // same name and not normal error + } + return e.Message == err.Message +} + +func parseError(err Error) error { + if err.Name == "TimeoutError" { + return fmt.Errorf("%w: %w: %w", ErrPlaywright, ErrTimeout, &err) + } else if err.Name == "TargetClosedError" { + return fmt.Errorf("%w: %w: %w", ErrPlaywright, ErrTargetClosed, &err) + } + return fmt.Errorf("%w: %w", ErrPlaywright, &err) +} + +func targetClosedError(reason *string) error { + if reason == nil { + return ErrTargetClosed + } + return fmt.Errorf("%w: %s", ErrTargetClosed, *reason) +} diff --git a/vendor/github.com/playwright-community/playwright-go/event_emitter.go b/vendor/github.com/playwright-community/playwright-go/event_emitter.go new file mode 100644 index 0000000..d4d62ef --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/event_emitter.go @@ -0,0 +1,163 @@ +package playwright + +import ( + "math" + "reflect" + "slices" + "sync" +) + +type EventEmitter interface { + Emit(name string, payload ...interface{}) bool + ListenerCount(name string) int + On(name string, handler interface{}) + Once(name string, handler interface{}) + RemoveListener(name string, handler interface{}) + RemoveListeners(name string) +} + +type ( + eventEmitter struct { + eventsMutex sync.Mutex + events map[string]*eventRegister + hasInit bool + } + eventRegister struct { + sync.Mutex + listeners []listener + } + listener struct { + handler interface{} + once bool + } +) + +func NewEventEmitter() EventEmitter { + return &eventEmitter{} +} + +func (e *eventEmitter) Emit(name string, payload ...interface{}) (hasListener bool) { + e.eventsMutex.Lock() + e.init() + + evt, ok := e.events[name] + if !ok { + e.eventsMutex.Unlock() + return + } + e.eventsMutex.Unlock() + return evt.callHandlers(payload...) > 0 +} + +func (e *eventEmitter) Once(name string, handler interface{}) { + e.addEvent(name, handler, true) +} + +func (e *eventEmitter) On(name string, handler interface{}) { + e.addEvent(name, handler, false) +} + +func (e *eventEmitter) RemoveListener(name string, handler interface{}) { + e.eventsMutex.Lock() + defer e.eventsMutex.Unlock() + e.init() + + if evt, ok := e.events[name]; ok { + evt.Lock() + defer evt.Unlock() + evt.removeHandler(handler) + } +} + +func (e *eventEmitter) RemoveListeners(name string) { + e.eventsMutex.Lock() + defer e.eventsMutex.Unlock() + e.init() + delete(e.events, name) +} + +// ListenerCount count the listeners by name, count all if name is empty +func (e *eventEmitter) ListenerCount(name string) int { + e.eventsMutex.Lock() + defer e.eventsMutex.Unlock() + e.init() + + if name != "" { + evt, ok := e.events[name] + if !ok { + return 0 + } + return evt.count() + } + + count := 0 + for key := range e.events { + count += e.events[key].count() + } + + return count +} + +func (e *eventEmitter) addEvent(name string, handler interface{}, once bool) { + e.eventsMutex.Lock() + defer e.eventsMutex.Unlock() + e.init() + + if _, ok := e.events[name]; !ok { + e.events[name] = &eventRegister{ + listeners: make([]listener, 0), + } + } + e.events[name].addHandler(handler, once) +} + +func (e *eventEmitter) init() { + if !e.hasInit { + e.events = make(map[string]*eventRegister, 0) + e.hasInit = true + } +} + +func (er *eventRegister) addHandler(handler interface{}, once bool) { + er.Lock() + defer er.Unlock() + er.listeners = append(er.listeners, listener{handler: handler, once: once}) +} + +func (er *eventRegister) count() int { + er.Lock() + defer er.Unlock() + return len(er.listeners) +} + +func (er *eventRegister) removeHandler(handler interface{}) { + handlerPtr := reflect.ValueOf(handler).Pointer() + + er.listeners = slices.DeleteFunc(er.listeners, func(l listener) bool { + return reflect.ValueOf(l.handler).Pointer() == handlerPtr + }) +} + +func (er *eventRegister) callHandlers(payloads ...interface{}) int { + payloadV := make([]reflect.Value, 0) + + for _, p := range payloads { + payloadV = append(payloadV, reflect.ValueOf(p)) + } + + handle := func(l listener) { + handlerV := reflect.ValueOf(l.handler) + handlerV.Call(payloadV[:int(math.Min(float64(handlerV.Type().NumIn()), float64(len(payloadV))))]) + } + + er.Lock() + defer er.Unlock() + count := len(er.listeners) + for _, l := range er.listeners { + if l.once { + defer er.removeHandler(l.handler) + } + handle(l) + } + return count +} diff --git a/vendor/github.com/playwright-community/playwright-go/fetch.go b/vendor/github.com/playwright-community/playwright-go/fetch.go new file mode 100644 index 0000000..fc7f79f --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/fetch.go @@ -0,0 +1,451 @@ +package playwright + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "strings" +) + +type apiRequestImpl struct { + *Playwright +} + +func (r *apiRequestImpl) NewContext(options ...APIRequestNewContextOptions) (APIRequestContext, error) { + overrides := map[string]interface{}{} + if len(options) == 1 { + if options[0].ClientCertificates != nil { + certs, err := transformClientCertificate(options[0].ClientCertificates) + if err != nil { + return nil, err + } + overrides["clientCertificates"] = certs + options[0].ClientCertificates = nil + } + if options[0].ExtraHttpHeaders != nil { + overrides["extraHTTPHeaders"] = serializeMapToNameAndValue(options[0].ExtraHttpHeaders) + options[0].ExtraHttpHeaders = nil + } + if options[0].StorageStatePath != nil { + var storageState *StorageState + storageString, err := os.ReadFile(*options[0].StorageStatePath) + if err != nil { + return nil, fmt.Errorf("could not read storage state file: %w", err) + } + err = json.Unmarshal(storageString, &storageState) + if err != nil { + return nil, fmt.Errorf("could not parse storage state file: %w", err) + } + options[0].StorageState = storageState + options[0].StorageStatePath = nil + } + } + + channel, err := r.channel.Send("newRequest", options, overrides) + if err != nil { + return nil, err + } + return fromChannel(channel).(*apiRequestContextImpl), nil +} + +func newApiRequestImpl(pw *Playwright) *apiRequestImpl { + return &apiRequestImpl{pw} +} + +type apiRequestContextImpl struct { + channelOwner + tracing *tracingImpl + closeReason *string +} + +func (r *apiRequestContextImpl) Dispose(options ...APIRequestContextDisposeOptions) error { + if len(options) == 1 { + r.closeReason = options[0].Reason + } + _, err := r.channel.Send("dispose", map[string]interface{}{ + "reason": r.closeReason, + }) + if errors.Is(err, ErrTargetClosed) { + return nil + } + return err +} + +func (r *apiRequestContextImpl) Delete(url string, options ...APIRequestContextDeleteOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("DELETE"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) Fetch(urlOrRequest interface{}, options ...APIRequestContextFetchOptions) (APIResponse, error) { + switch v := urlOrRequest.(type) { + case string: + return r.innerFetch(v, nil, options...) + case Request: + return r.innerFetch("", v, options...) + default: + return nil, fmt.Errorf("urlOrRequest has unsupported type: %T", urlOrRequest) + } +} + +func (r *apiRequestContextImpl) innerFetch(url string, request Request, options ...APIRequestContextFetchOptions) (APIResponse, error) { + if r.closeReason != nil { + return nil, fmt.Errorf("%w: %s", ErrTargetClosed, *r.closeReason) + } + overrides := map[string]interface{}{} + if url != "" { + overrides["url"] = url + } else if request != nil { + overrides["url"] = request.URL() + } + + if len(options) == 1 { + if options[0].MaxRedirects != nil && *options[0].MaxRedirects < 0 { + return nil, errors.New("maxRedirects must be non-negative") + } + if options[0].MaxRetries != nil && *options[0].MaxRetries < 0 { + return nil, errors.New("maxRetries must be non-negative") + } + // only one of them can be specified + if countNonNil(options[0].Data, options[0].Form, options[0].Multipart) > 1 { + return nil, errors.New("only one of 'data', 'form' or 'multipart' can be specified") + } + if options[0].Method == nil { + if request != nil { + options[0].Method = String(request.Method()) + } else { + options[0].Method = String("GET") + } + } + if options[0].Headers == nil { + if request != nil { + overrides["headers"] = serializeMapToNameAndValue(request.Headers()) + } + } else { + overrides["headers"] = serializeMapToNameAndValue(options[0].Headers) + options[0].Headers = nil + } + if options[0].Data != nil { + switch v := options[0].Data.(type) { + case string: + headersArray, ok := overrides["headers"].([]map[string]string) + if ok && isJsonContentType(headersArray) { + if json.Valid([]byte(v)) { + overrides["jsonData"] = v + } else { + data, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("could not marshal data: %w", err) + } + overrides["jsonData"] = string(data) + } + } else { + overrides["postData"] = base64.StdEncoding.EncodeToString([]byte(v)) + } + case []byte: + overrides["postData"] = base64.StdEncoding.EncodeToString(v) + case interface{}: + data, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("could not marshal data: %w", err) + } + overrides["jsonData"] = string(data) + default: + return nil, errors.New("data must be a string, []byte, or interface{} that can marshal to json") + } + options[0].Data = nil + } else if options[0].Form != nil { + form, ok := options[0].Form.(map[string]interface{}) + if !ok { + return nil, errors.New("form must be a map") + } + overrides["formData"] = serializeMapToNameValue(form) + options[0].Form = nil + } else if options[0].Multipart != nil { + _, ok := options[0].Multipart.(map[string]interface{}) + if !ok { + return nil, errors.New("multipart must be a map") + } + multipartData := []map[string]interface{}{} + for name, value := range options[0].Multipart.(map[string]interface{}) { + switch v := value.(type) { + case InputFile: + multipartData = append(multipartData, map[string]interface{}{ + "name": name, + "file": map[string]string{ + "name": v.Name, + "mimeType": v.MimeType, + "buffer": base64.StdEncoding.EncodeToString(v.Buffer), + }, + }) + default: + multipartData = append(multipartData, map[string]interface{}{ + "name": name, + "value": String(fmt.Sprintf("%v", v)), + }) + } + } + overrides["multipartData"] = multipartData + options[0].Multipart = nil + } else if request != nil { + postDataBuf, err := request.PostDataBuffer() + if err == nil { + overrides["postData"] = base64.StdEncoding.EncodeToString(postDataBuf) + } + } + if options[0].Params != nil { + overrides["params"] = serializeMapToNameValue(options[0].Params) + options[0].Params = nil + } + } + + response, err := r.channel.Send("fetch", options, overrides) + if err != nil { + return nil, err + } + + return newAPIResponse(r, response.(map[string]interface{})), nil +} + +func (r *apiRequestContextImpl) Get(url string, options ...APIRequestContextGetOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("GET"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) Head(url string, options ...APIRequestContextHeadOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("HEAD"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) Patch(url string, options ...APIRequestContextPatchOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("PATCH"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) Put(url string, options ...APIRequestContextPutOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("PUT"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) Post(url string, options ...APIRequestContextPostOptions) (APIResponse, error) { + opts := APIRequestContextFetchOptions{ + Method: String("POST"), + } + if len(options) == 1 { + err := assignStructFields(&opts, options[0], false) + if err != nil { + return nil, err + } + } + + return r.Fetch(url, opts) +} + +func (r *apiRequestContextImpl) StorageState(path ...string) (*StorageState, error) { + result, err := r.channel.SendReturnAsDict("storageState") + if err != nil { + return nil, err + } + if len(path) == 1 { + file, err := os.Create(path[0]) + if err != nil { + return nil, err + } + if err := json.NewEncoder(file).Encode(result); err != nil { + return nil, err + } + if err := file.Close(); err != nil { + return nil, err + } + } + var storageState StorageState + remapMapToStruct(result, &storageState) + return &storageState, nil +} + +func newAPIRequestContext(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *apiRequestContextImpl { + rc := &apiRequestContextImpl{} + rc.createChannelOwner(rc, parent, objectType, guid, initializer) + rc.tracing = fromChannel(initializer["tracing"]).(*tracingImpl) + return rc +} + +type apiResponseImpl struct { + request *apiRequestContextImpl + initializer map[string]interface{} + headers *rawHeaders +} + +func (r *apiResponseImpl) Body() ([]byte, error) { + result, err := r.request.channel.SendReturnAsDict("fetchResponseBody", []map[string]interface{}{ + { + "fetchUid": r.fetchUid(), + }, + }) + if err != nil { + if errors.Is(err, ErrTargetClosed) { + return nil, errors.New("response has been disposed") + } + return nil, err + } + body := result["binary"] + if body == nil { + return nil, errors.New("response has been disposed") + } + return base64.StdEncoding.DecodeString(body.(string)) +} + +func (r *apiResponseImpl) Dispose() error { + _, err := r.request.channel.Send("disposeAPIResponse", []map[string]interface{}{ + { + "fetchUid": r.fetchUid(), + }, + }) + return err +} + +func (r *apiResponseImpl) Headers() map[string]string { + return r.headers.Headers() +} + +func (r *apiResponseImpl) HeadersArray() []NameValue { + return r.headers.HeadersArray() +} + +func (r *apiResponseImpl) JSON(v interface{}) error { + body, err := r.Body() + if err != nil { + return err + } + return json.Unmarshal(body, &v) +} + +func (r *apiResponseImpl) Ok() bool { + return r.Status() == 0 || (r.Status() >= 200 && r.Status() <= 299) +} + +func (r *apiResponseImpl) Status() int { + return int(r.initializer["status"].(float64)) +} + +func (r *apiResponseImpl) StatusText() string { + return r.initializer["statusText"].(string) +} + +func (r *apiResponseImpl) Text() (string, error) { + body, err := r.Body() + if err != nil { + return "", err + } + return string(body), nil +} + +func (r *apiResponseImpl) URL() string { + return r.initializer["url"].(string) +} + +func (r *apiResponseImpl) fetchUid() string { + return r.initializer["fetchUid"].(string) +} + +func (r *apiResponseImpl) fetchLog() ([]string, error) { + ret, err := r.request.channel.Send("fetchLog", map[string]interface{}{ + "fetchUid": r.fetchUid(), + }) + if err != nil { + return nil, err + } + result := make([]string, len(ret.([]interface{}))) + for i, v := range ret.([]interface{}) { + result[i] = v.(string) + } + return result, nil +} + +func newAPIResponse(context *apiRequestContextImpl, initializer map[string]interface{}) *apiResponseImpl { + return &apiResponseImpl{ + request: context, + initializer: initializer, + headers: newRawHeaders(initializer["headers"]), + } +} + +func countNonNil(args ...interface{}) int { + count := 0 + for _, v := range args { + if v != nil { + count++ + } + } + return count +} + +func isJsonContentType(headers []map[string]string) bool { + if len(headers) > 0 { + for _, v := range headers { + if strings.ToLower(v["name"]) == "content-type" { + if v["value"] == "application/json" { + return true + } + } + } + } + return false +} + +func serializeMapToNameValue(data map[string]interface{}) []map[string]string { + serialized := make([]map[string]string, 0, len(data)) + for k, v := range data { + serialized = append(serialized, map[string]string{ + "name": k, + "value": fmt.Sprintf("%v", v), + }) + } + return serialized +} diff --git a/vendor/github.com/playwright-community/playwright-go/file_chooser.go b/vendor/github.com/playwright-community/playwright-go/file_chooser.go new file mode 100644 index 0000000..119e885 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/file_chooser.go @@ -0,0 +1,44 @@ +package playwright + +type fileChooserImpl struct { + page Page + elementHandle ElementHandle + isMultiple bool +} + +func (f *fileChooserImpl) Page() Page { + return f.page +} + +func (f *fileChooserImpl) Element() ElementHandle { + return f.elementHandle +} + +func (f *fileChooserImpl) IsMultiple() bool { + return f.isMultiple +} + +// InputFile represents the input file for: +// - FileChooser.SetFiles() +// - ElementHandle.SetInputFiles() +// - Page.SetInputFiles() +type InputFile struct { + Name string `json:"name"` + MimeType string `json:"mimeType,omitempty"` + Buffer []byte `json:"buffer"` +} + +func (f *fileChooserImpl) SetFiles(files interface{}, options ...FileChooserSetFilesOptions) error { + if len(options) == 1 { + return f.elementHandle.SetInputFiles(files, ElementHandleSetInputFilesOptions(options[0])) + } + return f.elementHandle.SetInputFiles(files) +} + +func newFileChooser(page Page, elementHandle ElementHandle, isMultiple bool) *fileChooserImpl { + return &fileChooserImpl{ + page: page, + elementHandle: elementHandle, + isMultiple: isMultiple, + } +} diff --git a/vendor/github.com/playwright-community/playwright-go/frame.go b/vendor/github.com/playwright-community/playwright-go/frame.go new file mode 100644 index 0000000..b571c8e --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/frame.go @@ -0,0 +1,792 @@ +package playwright + +import ( + "errors" + "fmt" + "os" + "time" + + mapset "github.com/deckarep/golang-set/v2" +) + +type frameImpl struct { + channelOwner + detached bool + page *pageImpl + name string + url string + parentFrame Frame + childFrames []Frame + loadStates mapset.Set[string] +} + +func newFrame(parent *channelOwner, objectType string, guid string, initializer map[string]interface{}) *frameImpl { + var loadStates mapset.Set[string] + + if ls, ok := initializer["loadStates"].([]string); ok { + loadStates = mapset.NewSet[string](ls...) + } else { + loadStates = mapset.NewSet[string]() + } + f := &frameImpl{ + name: initializer["name"].(string), + url: initializer["url"].(string), + loadStates: loadStates, + childFrames: make([]Frame, 0), + } + f.createChannelOwner(f, parent, objectType, guid, initializer) + + channelOwner := fromNullableChannel(initializer["parentFrame"]) + if channelOwner != nil { + f.parentFrame = channelOwner.(*frameImpl) + f.parentFrame.(*frameImpl).childFrames = append(f.parentFrame.(*frameImpl).childFrames, f) + } + + f.channel.On("navigated", f.onFrameNavigated) + f.channel.On("loadstate", f.onLoadState) + return f +} + +func (f *frameImpl) URL() string { + f.RLock() + defer f.RUnlock() + return f.url +} + +func (f *frameImpl) Name() string { + f.RLock() + defer f.RUnlock() + return f.name +} + +func (f *frameImpl) SetContent(content string, options ...FrameSetContentOptions) error { + _, err := f.channel.Send("setContent", map[string]interface{}{ + "html": content, + }, options) + return err +} + +func (f *frameImpl) Content() (string, error) { + content, err := f.channel.Send("content") + if content == nil { + return "", err + } + return content.(string), err +} + +func (f *frameImpl) Goto(url string, options ...FrameGotoOptions) (Response, error) { + channel, err := f.channel.Send("goto", map[string]interface{}{ + "url": url, + }, options) + if err != nil { + return nil, fmt.Errorf("Frame.Goto %s: %w", url, err) + } + channelOwner := fromNullableChannel(channel) + if channelOwner == nil { + // navigation to about:blank or navigation to the same URL with a different hash + return nil, nil + } + return channelOwner.(*responseImpl), nil +} + +func (f *frameImpl) AddScriptTag(options FrameAddScriptTagOptions) (ElementHandle, error) { + if options.Path != nil { + file, err := os.ReadFile(*options.Path) + if err != nil { + return nil, err + } + options.Content = String(string(file)) + options.Path = nil + } + channel, err := f.channel.Send("addScriptTag", options) + if err != nil { + return nil, err + } + return fromChannel(channel).(*elementHandleImpl), nil +} + +func (f *frameImpl) AddStyleTag(options FrameAddStyleTagOptions) (ElementHandle, error) { + if options.Path != nil { + file, err := os.ReadFile(*options.Path) + if err != nil { + return nil, err + } + options.Content = String(string(file)) + options.Path = nil + } + channel, err := f.channel.Send("addStyleTag", options) + if err != nil { + return nil, err + } + return fromChannel(channel).(*elementHandleImpl), nil +} + +func (f *frameImpl) Page() Page { + return f.page +} + +func (f *frameImpl) WaitForLoadState(options ...FrameWaitForLoadStateOptions) error { + option := FrameWaitForLoadStateOptions{} + if len(options) == 1 { + option = options[0] + } + if option.State == nil { + option.State = LoadStateLoad + } + return f.waitForLoadStateImpl(string(*option.State), option.Timeout, nil) +} + +func (f *frameImpl) waitForLoadStateImpl(state string, timeout *float64, cb func() error) error { + if f.loadStates.ContainsOne(state) { + return nil + } + waiter, err := f.setNavigationWaiter(timeout) + if err != nil { + return err + } + waiter.WaitForEvent(f, "loadstate", func(payload interface{}) bool { + gotState := payload.(string) + return gotState == state + }) + if cb == nil { + _, err := waiter.Wait() + return err + } else { + _, err := waiter.RunAndWait(cb) + return err + } +} + +func (f *frameImpl) WaitForURL(url interface{}, options ...FrameWaitForURLOptions) error { + if f.page == nil { + return errors.New("frame is detached") + } + matcher := newURLMatcher(url, f.page.browserContext.options.BaseURL) + if matcher.Matches(f.URL()) { + state := "load" + timeout := Float(f.page.timeoutSettings.NavigationTimeout()) + if len(options) == 1 { + if options[0].WaitUntil != nil { + state = string(*options[0].WaitUntil) + } + if options[0].Timeout != nil { + timeout = options[0].Timeout + } + } + return f.waitForLoadStateImpl(state, timeout, nil) + } + navigationOptions := FrameExpectNavigationOptions{URL: url} + if len(options) > 0 { + navigationOptions.Timeout = options[0].Timeout + navigationOptions.WaitUntil = options[0].WaitUntil + } + if _, err := f.ExpectNavigation(nil, navigationOptions); err != nil { + return err + } + return nil +} + +func (f *frameImpl) ExpectNavigation(cb func() error, options ...FrameExpectNavigationOptions) (Response, error) { + if f.page == nil { + return nil, errors.New("frame is detached") + } + option := FrameExpectNavigationOptions{} + if len(options) == 1 { + option = options[0] + } + if option.WaitUntil == nil { + option.WaitUntil = WaitUntilStateLoad + } + if option.Timeout == nil { + option.Timeout = Float(f.page.timeoutSettings.NavigationTimeout()) + } + deadline := time.Now().Add(time.Duration(*option.Timeout) * time.Millisecond) + var matcher *urlMatcher + if option.URL != nil { + matcher = newURLMatcher(option.URL, f.page.browserContext.options.BaseURL) + } + predicate := func(events ...interface{}) bool { + ev := events[0].(map[string]interface{}) + err, ok := ev["error"] + if ok { + // Any failed navigation results in a rejection. + logger.Error("navigation error", "url", ev["url"].(string), "error", err) + return true + } + return matcher == nil || matcher.Matches(ev["url"].(string)) + } + waiter, err := f.setNavigationWaiter(option.Timeout) + if err != nil { + return nil, err + } + + eventData, err := waiter.WaitForEvent(f, "navigated", predicate).RunAndWait(cb) + if err != nil || eventData == nil { + return nil, err + } + + t := time.Until(deadline).Milliseconds() + if t > 0 { + err = f.waitForLoadStateImpl(string(*option.WaitUntil), Float(float64(t)), nil) + if err != nil { + return nil, err + } + } + event := eventData.(map[string]interface{}) + if event["newDocument"] != nil && event["newDocument"].(map[string]interface{})["request"] != nil { + request := fromChannel(event["newDocument"].(map[string]interface{})["request"]).(*requestImpl) + return request.Response() + } + return nil, nil +} + +func (f *frameImpl) setNavigationWaiter(timeout *float64) (*waiter, error) { + if f.page == nil { + return nil, errors.New("page does not exist") + } + waiter := newWaiter() + if timeout != nil { + waiter.WithTimeout(*timeout) + } else { + waiter.WithTimeout(f.page.timeoutSettings.NavigationTimeout()) + } + waiter.RejectOnEvent(f.page, "close", f.page.closeErrorWithReason()) + waiter.RejectOnEvent(f.page, "crash", fmt.Errorf("Navigation failed because page crashed!")) + waiter.RejectOnEvent(f.page, "framedetached", fmt.Errorf("Navigating frame was detached!"), func(payload interface{}) bool { + frame, ok := payload.(*frameImpl) + if ok && frame == f { + return true + } + return false + }) + return waiter, nil +} + +func (f *frameImpl) onFrameNavigated(ev map[string]interface{}) { + f.Lock() + f.url = ev["url"].(string) + f.name = ev["name"].(string) + f.Unlock() + f.Emit("navigated", ev) + _, ok := ev["error"] + if !ok && f.page != nil { + f.page.Emit("framenavigated", f) + } +} + +func (f *frameImpl) onLoadState(ev map[string]interface{}) { + if ev["add"] != nil { + add := ev["add"].(string) + f.loadStates.Add(add) + f.Emit("loadstate", add) + if f.parentFrame == nil && f.page != nil { + if add == "load" || add == "domcontentloaded" { + f.Page().Emit(add, f.page) + } + } + } else if ev["remove"] != nil { + remove := ev["remove"].(string) + f.loadStates.Remove(remove) + } +} + +func (f *frameImpl) QuerySelector(selector string, options ...FrameQuerySelectorOptions) (ElementHandle, error) { + params := map[string]interface{}{ + "selector": selector, + } + if len(options) == 1 { + params["strict"] = options[0].Strict + } + channel, err := f.channel.Send("querySelector", params) + if err != nil { + return nil, err + } + if channel == nil { + return nil, nil + } + return fromChannel(channel).(*elementHandleImpl), nil +} + +func (f *frameImpl) QuerySelectorAll(selector string) ([]ElementHandle, error) { + channels, err := f.channel.Send("querySelectorAll", map[string]interface{}{ + "selector": selector, + }) + if err != nil { + return nil, err + } + elements := make([]ElementHandle, 0) + for _, channel := range channels.([]interface{}) { + elements = append(elements, fromChannel(channel).(*elementHandleImpl)) + } + return elements, nil +} + +func (f *frameImpl) Evaluate(expression string, options ...interface{}) (interface{}, error) { + var arg interface{} + if len(options) == 1 { + arg = options[0] + } + result, err := f.channel.Send("evaluateExpression", map[string]interface{}{ + "expression": expression, + "arg": serializeArgument(arg), + }) + if err != nil { + return nil, err + } + return parseResult(result), nil +} + +func (f *frameImpl) EvalOnSelector(selector string, expression string, arg interface{}, options ...FrameEvalOnSelectorOptions) (interface{}, error) { + params := map[string]interface{}{ + "selector": selector, + "expression": expression, + "arg": serializeArgument(arg), + } + if len(options) == 1 && options[0].Strict != nil { + params["strict"] = *options[0].Strict + } + + result, err := f.channel.Send("evalOnSelector", params) + if err != nil { + return nil, err + } + return parseResult(result), nil +} + +func (f *frameImpl) EvalOnSelectorAll(selector string, expression string, options ...interface{}) (interface{}, error) { + var arg interface{} + if len(options) == 1 { + arg = options[0] + } + result, err := f.channel.Send("evalOnSelectorAll", map[string]interface{}{ + "selector": selector, + "expression": expression, + "arg": serializeArgument(arg), + }) + if err != nil { + return nil, err + } + return parseResult(result), nil +} + +func (f *frameImpl) EvaluateHandle(expression string, options ...interface{}) (JSHandle, error) { + var arg interface{} + if len(options) == 1 { + arg = options[0] + } + result, err := f.channel.Send("evaluateExpressionHandle", map[string]interface{}{ + "expression": expression, + "arg": serializeArgument(arg), + }) + if err != nil { + return nil, err + } + channelOwner := fromChannel(result) + if channelOwner == nil { + return nil, nil + } + return channelOwner.(JSHandle), nil +} + +func (f *frameImpl) Click(selector string, options ...FrameClickOptions) error { + _, err := f.channel.Send("click", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) WaitForSelector(selector string, options ...FrameWaitForSelectorOptions) (ElementHandle, error) { + channel, err := f.channel.Send("waitForSelector", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return nil, err + } + channelOwner := fromNullableChannel(channel) + if channelOwner == nil { + return nil, nil + } + return channelOwner.(*elementHandleImpl), nil +} + +func (f *frameImpl) DispatchEvent(selector, typ string, eventInit interface{}, options ...FrameDispatchEventOptions) error { + _, err := f.channel.Send("dispatchEvent", map[string]interface{}{ + "selector": selector, + "type": typ, + "eventInit": serializeArgument(eventInit), + }) + return err +} + +func (f *frameImpl) InnerText(selector string, options ...FrameInnerTextOptions) (string, error) { + innerText, err := f.channel.Send("innerText", map[string]interface{}{ + "selector": selector, + }, options) + if innerText == nil { + return "", err + } + return innerText.(string), err +} + +func (f *frameImpl) InnerHTML(selector string, options ...FrameInnerHTMLOptions) (string, error) { + innerHTML, err := f.channel.Send("innerHTML", map[string]interface{}{ + "selector": selector, + }, options) + if innerHTML == nil { + return "", err + } + return innerHTML.(string), err +} + +func (f *frameImpl) GetAttribute(selector string, name string, options ...FrameGetAttributeOptions) (string, error) { + attribute, err := f.channel.Send("getAttribute", map[string]interface{}{ + "selector": selector, + "name": name, + }, options) + if attribute == nil { + return "", err + } + return attribute.(string), err +} + +func (f *frameImpl) Hover(selector string, options ...FrameHoverOptions) error { + _, err := f.channel.Send("hover", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) SetInputFiles(selector string, files interface{}, options ...FrameSetInputFilesOptions) error { + params, err := convertInputFiles(files, f.page.browserContext) + if err != nil { + return err + } + params.Selector = &selector + _, err = f.channel.Send("setInputFiles", params, options) + return err +} + +func (f *frameImpl) Type(selector, text string, options ...FrameTypeOptions) error { + _, err := f.channel.Send("type", map[string]interface{}{ + "selector": selector, + "text": text, + }, options) + return err +} + +func (f *frameImpl) Press(selector, key string, options ...FramePressOptions) error { + _, err := f.channel.Send("press", map[string]interface{}{ + "selector": selector, + "key": key, + }, options) + return err +} + +func (f *frameImpl) Check(selector string, options ...FrameCheckOptions) error { + _, err := f.channel.Send("check", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) Uncheck(selector string, options ...FrameUncheckOptions) error { + _, err := f.channel.Send("uncheck", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) WaitForTimeout(timeout float64) { + time.Sleep(time.Duration(timeout) * time.Millisecond) +} + +func (f *frameImpl) WaitForFunction(expression string, arg interface{}, options ...FrameWaitForFunctionOptions) (JSHandle, error) { + var option FrameWaitForFunctionOptions + if len(options) == 1 { + option = options[0] + } + result, err := f.channel.Send("waitForFunction", map[string]interface{}{ + "expression": expression, + "arg": serializeArgument(arg), + "timeout": option.Timeout, + "polling": option.Polling, + }) + if err != nil { + return nil, err + } + handle := fromChannel(result) + if handle == nil { + return nil, nil + } + return handle.(*jsHandleImpl), nil +} + +func (f *frameImpl) Title() (string, error) { + title, err := f.channel.Send("title") + if title == nil { + return "", err + } + return title.(string), err +} + +func (f *frameImpl) ChildFrames() []Frame { + return f.childFrames +} + +func (f *frameImpl) Dblclick(selector string, options ...FrameDblclickOptions) error { + _, err := f.channel.Send("dblclick", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) Fill(selector string, value string, options ...FrameFillOptions) error { + _, err := f.channel.Send("fill", map[string]interface{}{ + "selector": selector, + "value": value, + }, options) + return err +} + +func (f *frameImpl) Focus(selector string, options ...FrameFocusOptions) error { + _, err := f.channel.Send("focus", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) FrameElement() (ElementHandle, error) { + channel, err := f.channel.Send("frameElement") + if err != nil { + return nil, err + } + return fromChannel(channel).(*elementHandleImpl), nil +} + +func (f *frameImpl) IsDetached() bool { + return f.detached +} + +func (f *frameImpl) ParentFrame() Frame { + return f.parentFrame +} + +func (f *frameImpl) TextContent(selector string, options ...FrameTextContentOptions) (string, error) { + textContent, err := f.channel.Send("textContent", map[string]interface{}{ + "selector": selector, + }, options) + if textContent == nil { + return "", err + } + return textContent.(string), err +} + +func (f *frameImpl) Tap(selector string, options ...FrameTapOptions) error { + _, err := f.channel.Send("tap", map[string]interface{}{ + "selector": selector, + }, options) + return err +} + +func (f *frameImpl) SelectOption(selector string, values SelectOptionValues, options ...FrameSelectOptionOptions) ([]string, error) { + opts := convertSelectOptionSet(values) + + m := make(map[string]interface{}) + m["selector"] = selector + for k, v := range opts { + m[k] = v + } + selected, err := f.channel.Send("selectOption", m, options) + if err != nil { + return nil, err + } + + return transformToStringList(selected), nil +} + +func (f *frameImpl) IsChecked(selector string, options ...FrameIsCheckedOptions) (bool, error) { + checked, err := f.channel.Send("isChecked", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return checked.(bool), nil +} + +func (f *frameImpl) IsDisabled(selector string, options ...FrameIsDisabledOptions) (bool, error) { + disabled, err := f.channel.Send("isDisabled", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return disabled.(bool), nil +} + +func (f *frameImpl) IsEditable(selector string, options ...FrameIsEditableOptions) (bool, error) { + editable, err := f.channel.Send("isEditable", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return editable.(bool), nil +} + +func (f *frameImpl) IsEnabled(selector string, options ...FrameIsEnabledOptions) (bool, error) { + enabled, err := f.channel.Send("isEnabled", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return enabled.(bool), nil +} + +func (f *frameImpl) IsHidden(selector string, options ...FrameIsHiddenOptions) (bool, error) { + hidden, err := f.channel.Send("isHidden", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return hidden.(bool), nil +} + +func (f *frameImpl) IsVisible(selector string, options ...FrameIsVisibleOptions) (bool, error) { + visible, err := f.channel.Send("isVisible", map[string]interface{}{ + "selector": selector, + }, options) + if err != nil { + return false, err + } + return visible.(bool), nil +} + +func (f *frameImpl) InputValue(selector string, options ...FrameInputValueOptions) (string, error) { + value, err := f.channel.Send("inputValue", map[string]interface{}{ + "selector": selector, + }, options) + if value == nil { + return "", err + } + return value.(string), err +} + +func (f *frameImpl) DragAndDrop(source, target string, options ...FrameDragAndDropOptions) error { + _, err := f.channel.Send("dragAndDrop", map[string]interface{}{ + "source": source, + "target": target, + }, options) + return err +} + +func (f *frameImpl) SetChecked(selector string, checked bool, options ...FrameSetCheckedOptions) error { + if checked { + _, err := f.channel.Send("check", map[string]interface{}{ + "selector": selector, + }, options) + return err + } else { + _, err := f.channel.Send("uncheck", map[string]interface{}{ + "selector": selector, + }, options) + return err + } +} + +func (f *frameImpl) Locator(selector string, options ...FrameLocatorOptions) Locator { + var option LocatorOptions + if len(options) == 1 { + option = LocatorOptions{ + Has: options[0].Has, + HasNot: options[0].HasNot, + HasText: options[0].HasText, + HasNotText: options[0].HasNotText, + } + } + return newLocator(f, selector, option) +} + +func (f *frameImpl) GetByAltText(text interface{}, options ...FrameGetByAltTextOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return f.Locator(getByAltTextSelector(text, exact)) +} + +func (f *frameImpl) GetByLabel(text interface{}, options ...FrameGetByLabelOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return f.Locator(getByLabelSelector(text, exact)) +} + +func (f *frameImpl) GetByPlaceholder(text interface{}, options ...FrameGetByPlaceholderOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return f.Locator(getByPlaceholderSelector(text, exact)) +} + +func (f *frameImpl) GetByRole(role AriaRole, options ...FrameGetByRoleOptions) Locator { + if len(options) == 1 { + return f.Locator(getByRoleSelector(role, LocatorGetByRoleOptions(options[0]))) + } + return f.Locator(getByRoleSelector(role)) +} + +func (f *frameImpl) GetByTestId(testId interface{}) Locator { + return f.Locator(getByTestIdSelector(getTestIdAttributeName(), testId)) +} + +func (f *frameImpl) GetByText(text interface{}, options ...FrameGetByTextOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return f.Locator(getByTextSelector(text, exact)) +} + +func (f *frameImpl) GetByTitle(text interface{}, options ...FrameGetByTitleOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return f.Locator(getByTitleSelector(text, exact)) +} + +func (f *frameImpl) FrameLocator(selector string) FrameLocator { + return newFrameLocator(f, selector) +} + +func (f *frameImpl) highlight(selector string) error { + _, err := f.channel.Send("highlight", map[string]interface{}{ + "selector": selector, + }) + return err +} + +func (f *frameImpl) queryCount(selector string) (int, error) { + response, err := f.channel.Send("queryCount", map[string]interface{}{ + "selector": selector, + }) + if err != nil { + return 0, err + } + return int(response.(float64)), nil +} diff --git a/vendor/github.com/playwright-community/playwright-go/frame_locator.go b/vendor/github.com/playwright-community/playwright-go/frame_locator.go new file mode 100644 index 0000000..d4b8fd0 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/frame_locator.go @@ -0,0 +1,130 @@ +package playwright + +import ( + "errors" + "fmt" + "strconv" +) + +type frameLocatorImpl struct { + frame *frameImpl + frameSelector string +} + +func newFrameLocator(frame *frameImpl, frameSelector string) *frameLocatorImpl { + return &frameLocatorImpl{frame: frame, frameSelector: frameSelector} +} + +func (fl *frameLocatorImpl) First() FrameLocator { + return newFrameLocator(fl.frame, fl.frameSelector+" >> nth=0") +} + +func (fl *frameLocatorImpl) FrameLocator(selector string) FrameLocator { + return newFrameLocator(fl.frame, fl.frameSelector+" >> internal:control=enter-frame >> "+selector) +} + +func (fl *frameLocatorImpl) GetByAltText(text interface{}, options ...FrameLocatorGetByAltTextOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return fl.Locator(getByAltTextSelector(text, exact)) +} + +func (fl *frameLocatorImpl) GetByLabel(text interface{}, options ...FrameLocatorGetByLabelOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return fl.Locator(getByLabelSelector(text, exact)) +} + +func (fl *frameLocatorImpl) GetByPlaceholder(text interface{}, options ...FrameLocatorGetByPlaceholderOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return fl.Locator(getByPlaceholderSelector(text, exact)) +} + +func (fl *frameLocatorImpl) GetByRole(role AriaRole, options ...FrameLocatorGetByRoleOptions) Locator { + if len(options) == 1 { + return fl.Locator(getByRoleSelector(role, LocatorGetByRoleOptions(options[0]))) + } + return fl.Locator(getByRoleSelector(role)) +} + +func (fl *frameLocatorImpl) GetByTestId(testId interface{}) Locator { + return fl.Locator(getByTestIdSelector(getTestIdAttributeName(), testId)) +} + +func (fl *frameLocatorImpl) GetByText(text interface{}, options ...FrameLocatorGetByTextOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return fl.Locator(getByTextSelector(text, exact)) +} + +func (fl *frameLocatorImpl) GetByTitle(text interface{}, options ...FrameLocatorGetByTitleOptions) Locator { + exact := false + if len(options) == 1 { + if *options[0].Exact { + exact = true + } + } + return fl.Locator(getByTitleSelector(text, exact)) +} + +func (fl *frameLocatorImpl) Last() FrameLocator { + return newFrameLocator(fl.frame, fl.frameSelector+" >> nth=-1") +} + +func (fl *frameLocatorImpl) Locator(selectorOrLocator interface{}, options ...FrameLocatorLocatorOptions) Locator { + var option LocatorOptions + if len(options) == 1 { + option = LocatorOptions{ + Has: options[0].Has, + HasNot: options[0].HasNot, + HasText: options[0].HasText, + HasNotText: options[0].HasNotText, + } + } + + selector, ok := selectorOrLocator.(string) + if ok { + return newLocator(fl.frame, fl.frameSelector+" >> internal:control=enter-frame >> "+selector, option) + } + locator, ok := selectorOrLocator.(*locatorImpl) + if ok { + if fl.frame != locator.frame { + locator.err = errors.Join(locator.err, ErrLocatorNotSameFrame) + return locator + } + return newLocator(locator.frame, + fmt.Sprintf("%s >> internal:control=enter-frame >> %s", fl.frameSelector, locator.selector), + option, + ) + } + return &locatorImpl{ + frame: fl.frame, + selector: fl.frameSelector, + err: fmt.Errorf("invalid locator parameter: %v", selectorOrLocator), + } +} + +func (fl *frameLocatorImpl) Nth(index int) FrameLocator { + return newFrameLocator(fl.frame, fl.frameSelector+" >> nth="+strconv.Itoa(index)) +} + +func (fl *frameLocatorImpl) Owner() Locator { + return newLocator(fl.frame, fl.frameSelector) +} diff --git a/vendor/github.com/playwright-community/playwright-go/generated-enums.go b/vendor/github.com/playwright-community/playwright-go/generated-enums.go new file mode 100644 index 0000000..92c20c6 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/generated-enums.go @@ -0,0 +1,404 @@ +package playwright + +func getMixedState(in string) *MixedState { + v := MixedState(in) + return &v +} + +type MixedState string + +var ( + MixedStateOn *MixedState = getMixedState("On") + MixedStateOff = getMixedState("Off") + MixedStateMixed = getMixedState("Mixed") +) + +func getElementState(in string) *ElementState { + v := ElementState(in) + return &v +} + +type ElementState string + +var ( + ElementStateVisible *ElementState = getElementState("visible") + ElementStateHidden = getElementState("hidden") + ElementStateStable = getElementState("stable") + ElementStateEnabled = getElementState("enabled") + ElementStateDisabled = getElementState("disabled") + ElementStateEditable = getElementState("editable") +) + +func getAriaRole(in string) *AriaRole { + v := AriaRole(in) + return &v +} + +type AriaRole string + +var ( + AriaRoleAlert *AriaRole = getAriaRole("alert") + AriaRoleAlertdialog = getAriaRole("alertdialog") + AriaRoleApplication = getAriaRole("application") + AriaRoleArticle = getAriaRole("article") + AriaRoleBanner = getAriaRole("banner") + AriaRoleBlockquote = getAriaRole("blockquote") + AriaRoleButton = getAriaRole("button") + AriaRoleCaption = getAriaRole("caption") + AriaRoleCell = getAriaRole("cell") + AriaRoleCheckbox = getAriaRole("checkbox") + AriaRoleCode = getAriaRole("code") + AriaRoleColumnheader = getAriaRole("columnheader") + AriaRoleCombobox = getAriaRole("combobox") + AriaRoleComplementary = getAriaRole("complementary") + AriaRoleContentinfo = getAriaRole("contentinfo") + AriaRoleDefinition = getAriaRole("definition") + AriaRoleDeletion = getAriaRole("deletion") + AriaRoleDialog = getAriaRole("dialog") + AriaRoleDirectory = getAriaRole("directory") + AriaRoleDocument = getAriaRole("document") + AriaRoleEmphasis = getAriaRole("emphasis") + AriaRoleFeed = getAriaRole("feed") + AriaRoleFigure = getAriaRole("figure") + AriaRoleForm = getAriaRole("form") + AriaRoleGeneric = getAriaRole("generic") + AriaRoleGrid = getAriaRole("grid") + AriaRoleGridcell = getAriaRole("gridcell") + AriaRoleGroup = getAriaRole("group") + AriaRoleHeading = getAriaRole("heading") + AriaRoleImg = getAriaRole("img") + AriaRoleInsertion = getAriaRole("insertion") + AriaRoleLink = getAriaRole("link") + AriaRoleList = getAriaRole("list") + AriaRoleListbox = getAriaRole("listbox") + AriaRoleListitem = getAriaRole("listitem") + AriaRoleLog = getAriaRole("log") + AriaRoleMain = getAriaRole("main") + AriaRoleMarquee = getAriaRole("marquee") + AriaRoleMath = getAriaRole("math") + AriaRoleMeter = getAriaRole("meter") + AriaRoleMenu = getAriaRole("menu") + AriaRoleMenubar = getAriaRole("menubar") + AriaRoleMenuitem = getAriaRole("menuitem") + AriaRoleMenuitemcheckbox = getAriaRole("menuitemcheckbox") + AriaRoleMenuitemradio = getAriaRole("menuitemradio") + AriaRoleNavigation = getAriaRole("navigation") + AriaRoleNone = getAriaRole("none") + AriaRoleNote = getAriaRole("note") + AriaRoleOption = getAriaRole("option") + AriaRoleParagraph = getAriaRole("paragraph") + AriaRolePresentation = getAriaRole("presentation") + AriaRoleProgressbar = getAriaRole("progressbar") + AriaRoleRadio = getAriaRole("radio") + AriaRoleRadiogroup = getAriaRole("radiogroup") + AriaRoleRegion = getAriaRole("region") + AriaRoleRow = getAriaRole("row") + AriaRoleRowgroup = getAriaRole("rowgroup") + AriaRoleRowheader = getAriaRole("rowheader") + AriaRoleScrollbar = getAriaRole("scrollbar") + AriaRoleSearch = getAriaRole("search") + AriaRoleSearchbox = getAriaRole("searchbox") + AriaRoleSeparator = getAriaRole("separator") + AriaRoleSlider = getAriaRole("slider") + AriaRoleSpinbutton = getAriaRole("spinbutton") + AriaRoleStatus = getAriaRole("status") + AriaRoleStrong = getAriaRole("strong") + AriaRoleSubscript = getAriaRole("subscript") + AriaRoleSuperscript = getAriaRole("superscript") + AriaRoleSwitch = getAriaRole("switch") + AriaRoleTab = getAriaRole("tab") + AriaRoleTable = getAriaRole("table") + AriaRoleTablist = getAriaRole("tablist") + AriaRoleTabpanel = getAriaRole("tabpanel") + AriaRoleTerm = getAriaRole("term") + AriaRoleTextbox = getAriaRole("textbox") + AriaRoleTime = getAriaRole("time") + AriaRoleTimer = getAriaRole("timer") + AriaRoleToolbar = getAriaRole("toolbar") + AriaRoleTooltip = getAriaRole("tooltip") + AriaRoleTree = getAriaRole("tree") + AriaRoleTreegrid = getAriaRole("treegrid") + AriaRoleTreeitem = getAriaRole("treeitem") +) + +func getColorScheme(in string) *ColorScheme { + v := ColorScheme(in) + return &v +} + +type ColorScheme string + +var ( + ColorSchemeLight *ColorScheme = getColorScheme("light") + ColorSchemeDark = getColorScheme("dark") + ColorSchemeNoPreference = getColorScheme("no-preference") + ColorSchemeNoOverride = getColorScheme("no-override") +) + +func getForcedColors(in string) *ForcedColors { + v := ForcedColors(in) + return &v +} + +type ForcedColors string + +var ( + ForcedColorsActive *ForcedColors = getForcedColors("active") + ForcedColorsNone = getForcedColors("none") + ForcedColorsNoOverride = getForcedColors("no-override") +) + +func getHarContentPolicy(in string) *HarContentPolicy { + v := HarContentPolicy(in) + return &v +} + +type HarContentPolicy string + +var ( + HarContentPolicyOmit *HarContentPolicy = getHarContentPolicy("omit") + HarContentPolicyEmbed = getHarContentPolicy("embed") + HarContentPolicyAttach = getHarContentPolicy("attach") +) + +func getHarMode(in string) *HarMode { + v := HarMode(in) + return &v +} + +type HarMode string + +var ( + HarModeFull *HarMode = getHarMode("full") + HarModeMinimal = getHarMode("minimal") +) + +func getReducedMotion(in string) *ReducedMotion { + v := ReducedMotion(in) + return &v +} + +type ReducedMotion string + +var ( + ReducedMotionReduce *ReducedMotion = getReducedMotion("reduce") + ReducedMotionNoPreference = getReducedMotion("no-preference") + ReducedMotionNoOverride = getReducedMotion("no-override") +) + +func getServiceWorkerPolicy(in string) *ServiceWorkerPolicy { + v := ServiceWorkerPolicy(in) + return &v +} + +type ServiceWorkerPolicy string + +var ( + ServiceWorkerPolicyAllow *ServiceWorkerPolicy = getServiceWorkerPolicy("allow") + ServiceWorkerPolicyBlock = getServiceWorkerPolicy("block") +) + +func getSameSiteAttribute(in string) *SameSiteAttribute { + v := SameSiteAttribute(in) + return &v +} + +type SameSiteAttribute string + +var ( + SameSiteAttributeStrict *SameSiteAttribute = getSameSiteAttribute("Strict") + SameSiteAttributeLax = getSameSiteAttribute("Lax") + SameSiteAttributeNone = getSameSiteAttribute("None") +) + +func getHarNotFound(in string) *HarNotFound { + v := HarNotFound(in) + return &v +} + +type HarNotFound string + +var ( + HarNotFoundAbort *HarNotFound = getHarNotFound("abort") + HarNotFoundFallback = getHarNotFound("fallback") +) + +func getRouteFromHarUpdateContentPolicy(in string) *RouteFromHarUpdateContentPolicy { + v := RouteFromHarUpdateContentPolicy(in) + return &v +} + +type RouteFromHarUpdateContentPolicy string + +var ( + RouteFromHarUpdateContentPolicyEmbed *RouteFromHarUpdateContentPolicy = getRouteFromHarUpdateContentPolicy("embed") + RouteFromHarUpdateContentPolicyAttach = getRouteFromHarUpdateContentPolicy("attach") +) + +func getUnrouteBehavior(in string) *UnrouteBehavior { + v := UnrouteBehavior(in) + return &v +} + +type UnrouteBehavior string + +var ( + UnrouteBehaviorWait *UnrouteBehavior = getUnrouteBehavior("wait") + UnrouteBehaviorIgnoreErrors = getUnrouteBehavior("ignoreErrors") + UnrouteBehaviorDefault = getUnrouteBehavior("default") +) + +func getMouseButton(in string) *MouseButton { + v := MouseButton(in) + return &v +} + +type MouseButton string + +var ( + MouseButtonLeft *MouseButton = getMouseButton("left") + MouseButtonRight = getMouseButton("right") + MouseButtonMiddle = getMouseButton("middle") +) + +func getKeyboardModifier(in string) *KeyboardModifier { + v := KeyboardModifier(in) + return &v +} + +type KeyboardModifier string + +var ( + KeyboardModifierAlt *KeyboardModifier = getKeyboardModifier("Alt") + KeyboardModifierControl = getKeyboardModifier("Control") + KeyboardModifierControlOrMeta = getKeyboardModifier("ControlOrMeta") + KeyboardModifierMeta = getKeyboardModifier("Meta") + KeyboardModifierShift = getKeyboardModifier("Shift") +) + +func getScreenshotAnimations(in string) *ScreenshotAnimations { + v := ScreenshotAnimations(in) + return &v +} + +type ScreenshotAnimations string + +var ( + ScreenshotAnimationsDisabled *ScreenshotAnimations = getScreenshotAnimations("disabled") + ScreenshotAnimationsAllow = getScreenshotAnimations("allow") +) + +func getScreenshotCaret(in string) *ScreenshotCaret { + v := ScreenshotCaret(in) + return &v +} + +type ScreenshotCaret string + +var ( + ScreenshotCaretHide *ScreenshotCaret = getScreenshotCaret("hide") + ScreenshotCaretInitial = getScreenshotCaret("initial") +) + +func getScreenshotScale(in string) *ScreenshotScale { + v := ScreenshotScale(in) + return &v +} + +type ScreenshotScale string + +var ( + ScreenshotScaleCss *ScreenshotScale = getScreenshotScale("css") + ScreenshotScaleDevice = getScreenshotScale("device") +) + +func getScreenshotType(in string) *ScreenshotType { + v := ScreenshotType(in) + return &v +} + +type ScreenshotType string + +var ( + ScreenshotTypePng *ScreenshotType = getScreenshotType("png") + ScreenshotTypeJpeg = getScreenshotType("jpeg") +) + +func getWaitForSelectorState(in string) *WaitForSelectorState { + v := WaitForSelectorState(in) + return &v +} + +type WaitForSelectorState string + +var ( + WaitForSelectorStateAttached *WaitForSelectorState = getWaitForSelectorState("attached") + WaitForSelectorStateDetached = getWaitForSelectorState("detached") + WaitForSelectorStateVisible = getWaitForSelectorState("visible") + WaitForSelectorStateHidden = getWaitForSelectorState("hidden") +) + +func getWaitUntilState(in string) *WaitUntilState { + v := WaitUntilState(in) + return &v +} + +type WaitUntilState string + +var ( + WaitUntilStateLoad *WaitUntilState = getWaitUntilState("load") + WaitUntilStateDomcontentloaded = getWaitUntilState("domcontentloaded") + WaitUntilStateNetworkidle = getWaitUntilState("networkidle") + WaitUntilStateCommit = getWaitUntilState("commit") +) + +func getLoadState(in string) *LoadState { + v := LoadState(in) + return &v +} + +type LoadState string + +var ( + LoadStateLoad *LoadState = getLoadState("load") + LoadStateDomcontentloaded = getLoadState("domcontentloaded") + LoadStateNetworkidle = getLoadState("networkidle") +) + +func getContrast(in string) *Contrast { + v := Contrast(in) + return &v +} + +type Contrast string + +var ( + ContrastNoPreference *Contrast = getContrast("no-preference") + ContrastMore = getContrast("more") + ContrastNoOverride = getContrast("no-override") +) + +func getMedia(in string) *Media { + v := Media(in) + return &v +} + +type Media string + +var ( + MediaScreen *Media = getMedia("screen") + MediaPrint = getMedia("print") + MediaNoOverride = getMedia("no-override") +) + +func getHttpCredentialsSend(in string) *HttpCredentialsSend { + v := HttpCredentialsSend(in) + return &v +} + +type HttpCredentialsSend string + +var ( + HttpCredentialsSendUnauthorized *HttpCredentialsSend = getHttpCredentialsSend("unauthorized") + HttpCredentialsSendAlways = getHttpCredentialsSend("always") +) diff --git a/vendor/github.com/playwright-community/playwright-go/generated-interfaces.go b/vendor/github.com/playwright-community/playwright-go/generated-interfaces.go new file mode 100644 index 0000000..187dc91 --- /dev/null +++ b/vendor/github.com/playwright-community/playwright-go/generated-interfaces.go @@ -0,0 +1,4658 @@ +package playwright + +// Exposes API that can be used for the Web API testing. This class is used for creating [APIRequestContext] instance +// which in turn can be used for sending web requests. An instance of this class can be obtained via +// [Playwright.Request]. For more information see [APIRequestContext]. +type APIRequest interface { + // Creates new instances of [APIRequestContext]. + NewContext(options ...APIRequestNewContextOptions) (APIRequestContext, error) +} + +// This API is used for the Web API testing. You can use it to trigger API endpoints, configure micro-services, +// prepare environment or the service to your e2e test. +// Each Playwright browser context has associated with it [APIRequestContext] instance which shares cookie storage +// with the browser context and can be accessed via [BrowserContext.Request] or [Page.Request]. It is also possible to +// create a new APIRequestContext instance manually by calling [APIRequest.NewContext]. +// **Cookie management** +// [APIRequestContext] returned by [BrowserContext.Request] and [Page.Request] shares cookie storage with the +// corresponding [BrowserContext]. Each API request will have `Cookie` header populated with the values from the +// browser context. If the API response contains `Set-Cookie` header it will automatically update [BrowserContext] +// cookies and requests made from the page will pick them up. This means that if you log in using this API, your e2e +// test will be logged in and vice versa. +// If you want API requests to not interfere with the browser cookies you should create a new [APIRequestContext] by +// calling [APIRequest.NewContext]. Such `APIRequestContext` object will have its own isolated cookie storage. +type APIRequestContext interface { + // Sends HTTP(S) [DELETE] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [DELETE]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/DELETE + Delete(url string, options ...APIRequestContextDeleteOptions) (APIResponse, error) + + // All responses returned by [APIRequestContext.Get] and similar methods are stored in the memory, so that you can + // later call [APIResponse.Body].This method discards all its resources, calling any method on disposed + // [APIRequestContext] will throw an exception. + Dispose(options ...APIRequestContextDisposeOptions) error + + // Sends HTTP(S) request and returns its response. The method will populate request cookies from the context and + // update context cookies from the response. The method will automatically follow redirects. + // + // urlOrRequest: Target URL or Request to get all parameters from. + Fetch(urlOrRequest interface{}, options ...APIRequestContextFetchOptions) (APIResponse, error) + + // Sends HTTP(S) [GET] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [GET]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET + Get(url string, options ...APIRequestContextGetOptions) (APIResponse, error) + + // Sends HTTP(S) [HEAD] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [HEAD]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD + Head(url string, options ...APIRequestContextHeadOptions) (APIResponse, error) + + // Sends HTTP(S) [PATCH] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [PATCH]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PATCH + Patch(url string, options ...APIRequestContextPatchOptions) (APIResponse, error) + + // Sends HTTP(S) [POST] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [POST]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST + Post(url string, options ...APIRequestContextPostOptions) (APIResponse, error) + + // Sends HTTP(S) [PUT] request and returns its + // response. The method will populate request cookies from the context and update context cookies from the response. + // The method will automatically follow redirects. + // + // url: Target URL. + // + // [PUT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT + Put(url string, options ...APIRequestContextPutOptions) (APIResponse, error) + + // Returns storage state for this request context, contains current cookies and local storage snapshot if it was + // passed to the constructor. + StorageState(path ...string) (*StorageState, error) +} + +// [APIResponse] class represents responses returned by [APIRequestContext.Get] and similar methods. +type APIResponse interface { + // Returns the buffer with response body. + Body() ([]byte, error) + + // Disposes the body of this response. If not called then the body will stay in memory until the context closes. + Dispose() error + + // An object with all the response HTTP headers associated with this response. + Headers() map[string]string + + // An array with all the response HTTP headers associated with this response. Header names are not lower-cased. + // Headers with multiple entries, such as `Set-Cookie`, appear in the array multiple times. + HeadersArray() []NameValue + + // Returns the JSON representation of response body. + // This method will throw if the response body is not parsable via `JSON.parse`. + JSON(v interface{}) error + + // Contains a boolean stating whether the response was successful (status in the range 200-299) or not. + Ok() bool + + // Contains the status code of the response (e.g., 200 for a success). + Status() int + + // Contains the status text of the response (e.g. usually an "OK" for a success). + StatusText() string + + // Returns the text representation of response body. + Text() (string, error) + + // Contains the URL of the response. + URL() string +} + +// The [APIResponseAssertions] class provides assertion methods that can be used to make assertions about the +// [APIResponse] in the tests. +type APIResponseAssertions interface { + // Makes the assertion check for the opposite condition. For example, this code tests that the response status is not + // successful: + Not() APIResponseAssertions + + // Ensures the response status code is within `200..299` range. + ToBeOK() error +} + +// A Browser is created via [BrowserType.Launch]. An example of using a [Browser] to create a [Page]: +type Browser interface { + EventEmitter + // Emitted when Browser gets disconnected from the browser application. This might happen because of one of the + // following: + // - Browser application is closed or crashed. + // - The [Browser.Close] method was called. + OnDisconnected(fn func(Browser)) + + // Get the browser type (chromium, firefox or webkit) that the browser belongs to. + BrowserType() BrowserType + + // In case this browser is obtained using [BrowserType.Launch], closes the browser and all of its pages (if any were + // opened). + // In case this browser is connected to, clears all created contexts belonging to this browser and disconnects from + // the browser server. + // **NOTE** This is similar to force-quitting the browser. To close pages gracefully and ensure you receive page close + // events, call [BrowserContext.Close] on any [BrowserContext] instances you explicitly created earlier using + // [Browser.NewContext] **before** calling [Browser.Close]. + // The [Browser] object itself is considered to be disposed and cannot be used anymore. + Close(options ...BrowserCloseOptions) error + + // Returns an array of all open browser contexts. In a newly created browser, this will return zero browser contexts. + Contexts() []BrowserContext + + // Indicates that the browser is connected. + IsConnected() bool + + // **NOTE** CDP Sessions are only supported on Chromium-based browsers. + // Returns the newly created browser session. + NewBrowserCDPSession() (CDPSession, error) + + // Creates a new browser context. It won't share cookies/cache with other browser contexts. + // **NOTE** If directly using this method to create [BrowserContext]s, it is best practice to explicitly close the + // returned context via [BrowserContext.Close] when your code is done with the [BrowserContext], and before calling + // [Browser.Close]. This will ensure the `context` is closed gracefully and any artifacts—like HARs and videos—are + // fully flushed and saved. + NewContext(options ...BrowserNewContextOptions) (BrowserContext, error) + + // Creates a new page in a new browser context. Closing this page will close the context as well. + // This is a convenience API that should only be used for the single-page scenarios and short snippets. Production + // code and testing frameworks should explicitly create [Browser.NewContext] followed by the [BrowserContext.NewPage] + // to control their exact life times. + NewPage(options ...BrowserNewPageOptions) (Page, error) + + // **NOTE** This API controls + // [Chromium Tracing] which is a low-level + // chromium-specific debugging tool. API to control [Playwright Tracing] could be found + // [here]. + // You can use [Browser.StartTracing] and [Browser.StopTracing] to create a trace file that can be opened in Chrome + // DevTools performance panel. + // + // [Chromium Tracing]: https://www.chromium.org/developers/how-tos/trace-event-profiling-tool + // [Playwright Tracing]: ../trace-viewer + // [here]: ./class-tracing + StartTracing(options ...BrowserStartTracingOptions) error + + // **NOTE** This API controls + // [Chromium Tracing] which is a low-level + // chromium-specific debugging tool. API to control [Playwright Tracing] could be found + // [here]. + // Returns the buffer with trace data. + // + // [Chromium Tracing]: https://www.chromium.org/developers/how-tos/trace-event-profiling-tool + // [Playwright Tracing]: ../trace-viewer + // [here]: ./class-tracing + StopTracing() ([]byte, error) + + // Returns the browser version. + Version() string +} + +// BrowserContexts provide a way to operate multiple independent browser sessions. +// If a page opens another page, e.g. with a `window.open` call, the popup will belong to the parent page's browser +// context. +// Playwright allows creating isolated non-persistent browser contexts with [Browser.NewContext] method. +// Non-persistent browser contexts don't write any browsing data to disk. +type BrowserContext interface { + EventEmitter + // **NOTE** Only works with Chromium browser's persistent context. + // Emitted when new background page is created in the context. + OnBackgroundPage(fn func(Page)) + + // Playwright has ability to mock clock and passage of time. + Clock() Clock + + // Emitted when Browser context gets closed. This might happen because of one of the following: + // - Browser context is closed. + // - Browser application is closed or crashed. + // - The [Browser.Close] method was called. + OnClose(fn func(BrowserContext)) + + // Emitted when JavaScript within the page calls one of console API methods, e.g. `console.log` or `console.dir`. + // The arguments passed into `console.log` and the page are available on the [ConsoleMessage] event handler argument. + OnConsole(fn func(ConsoleMessage)) + + // Emitted when a JavaScript dialog appears, such as `alert`, `prompt`, `confirm` or `beforeunload`. Listener **must** + // either [Dialog.Accept] or [Dialog.Dismiss] the dialog - otherwise the page will + // [freeze] waiting for the dialog, + // and actions like click will never finish. + // + // [freeze]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/EventLoop#never_blocking + OnDialog(fn func(Dialog)) + + // The event is emitted when a new Page is created in the BrowserContext. The page may still be loading. The event + // will also fire for popup pages. See also [Page.OnPopup] to receive events about popups relevant to a specific page. + // The earliest moment that page is available is when it has navigated to the initial url. For example, when opening a + // popup with `window.open('http://example.com')`, this event will fire when the network request to + // "http://example.com" is done and its response has started loading in the popup. If you would like to route/listen + // to this network request, use [BrowserContext.Route] and [BrowserContext.OnRequest] respectively instead of similar + // methods on the [Page]. + // **NOTE** Use [Page.WaitForLoadState] to wait until the page gets to a particular state (you should not need it in + // most cases). + OnPage(fn func(Page)) + + // Emitted when exception is unhandled in any of the pages in this context. To listen for errors from a particular + // page, use [Page.OnPageError] instead. + OnWebError(fn func(WebError)) + + // Emitted when a request is issued from any pages created through this context. The [request] object is read-only. To + // only listen for requests from a particular page, use [Page.OnRequest]. + // In order to intercept and mutate requests, see [BrowserContext.Route] or [Page.Route]. + OnRequest(fn func(Request)) + + // Emitted when a request fails, for example by timing out. To only listen for failed requests from a particular page, + // use [Page.OnRequestFailed]. + // **NOTE** HTTP Error responses, such as 404 or 503, are still successful responses from HTTP standpoint, so request + // will complete with [BrowserContext.OnRequestFinished] event and not with [BrowserContext.OnRequestFailed]. + OnRequestFailed(fn func(Request)) + + // Emitted when a request finishes successfully after downloading the response body. For a successful response, the + // sequence of events is `request`, `response` and `requestfinished`. To listen for successful requests from a + // particular page, use [Page.OnRequestFinished]. + OnRequestFinished(fn func(Request)) + + // Emitted when [response] status and headers are received for a request. For a successful response, the sequence of + // events is `request`, `response` and `requestfinished`. To listen for response events from a particular page, use + // [Page.OnResponse]. + OnResponse(fn func(Response)) + + // Adds cookies into this browser context. All pages within this context will have these cookies installed. Cookies + // can be obtained via [BrowserContext.Cookies]. + AddCookies(cookies []OptionalCookie) error + + // Adds a script which would be evaluated in one of the following scenarios: + // - Whenever a page is created in the browser context or is navigated. + // - Whenever a child frame is attached or navigated in any page in the browser context. In this case, the script is + // evaluated in the context of the newly attached frame. + // The script is evaluated after the document was created but before any of its scripts were run. This is useful to + // amend the JavaScript environment, e.g. to seed `Math.random`. + // + // script: Script to be evaluated in all pages in the browser context. + AddInitScript(script Script) error + + // **NOTE** Background pages are only supported on Chromium-based browsers. + // All existing background pages in the context. + BackgroundPages() []Page + + // Returns the browser instance of the context. If it was launched as a persistent context null gets returned. + Browser() Browser + + // Removes cookies from context. Accepts optional filter. + ClearCookies(options ...BrowserContextClearCookiesOptions) error + + // Clears all permission overrides for the browser context. + ClearPermissions() error + + // Closes the browser context. All the pages that belong to the browser context will be closed. + // **NOTE** The default browser context cannot be closed. + Close(options ...BrowserContextCloseOptions) error + + // If no URLs are specified, this method returns all cookies. If URLs are specified, only cookies that affect those + // URLs are returned. + Cookies(urls ...string) ([]Cookie, error) + + // The method adds a function called “[object Object]” on the `window` object of every frame in every page in the + // context. When called, the function executes “[object Object]” and returns a [Promise] which resolves to the return + // value of “[object Object]”. If the “[object Object]” returns a [Promise], it will be awaited. + // The first argument of the “[object Object]” function contains information about the caller: `{ browserContext: + // BrowserContext, page: Page, frame: Frame }`. + // See [Page.ExposeBinding] for page-only version. + // + // 1. name: Name of the function on the window object. + // 2. binding: Callback function that will be called in the Playwright's context. + ExposeBinding(name string, binding BindingCallFunction, handle ...bool) error + + // The method adds a function called “[object Object]” on the `window` object of every frame in every page in the + // context. When called, the function executes “[object Object]” and returns a [Promise] which resolves to the return + // value of “[object Object]”. + // If the “[object Object]” returns a [Promise], it will be awaited. + // See [Page.ExposeFunction] for page-only version. + // + // 1. name: Name of the function on the window object. + // 2. binding: Callback function that will be called in the Playwright's context. + ExposeFunction(name string, binding ExposedFunction) error + + // Grants specified permissions to the browser context. Only grants corresponding permissions to the given origin if + // specified. + // + // permissions: A list of permissions to grant. + // + // **NOTE** Supported permissions differ between browsers, and even between different versions of the same browser. + // Any permission may stop working after an update. + // + // Here are some permissions that may be supported by some browsers: + // - `'accelerometer'` + // - `'ambient-light-sensor'` + // - `'background-sync'` + // - `'camera'` + // - `'clipboard-read'` + // - `'clipboard-write'` + // - `'geolocation'` + // - `'gyroscope'` + // - `'magnetometer'` + // - `'microphone'` + // - `'midi-sysex'` (system-exclusive midi) + // - `'midi'` + // - `'notifications'` + // - `'payment-handler'` + // - `'storage-access'` + GrantPermissions(permissions []string, options ...BrowserContextGrantPermissionsOptions) error + + // **NOTE** CDP sessions are only supported on Chromium-based browsers. + // Returns the newly created session. + // + // page: Target to create new session for. For backwards-compatibility, this parameter is named `page`, but it can be a + // `Page` or `Frame` type. + NewCDPSession(page interface{}) (CDPSession, error) + + // Creates a new page in the browser context. + NewPage() (Page, error) + + // Returns all open pages in the context. + Pages() []Page + + // API testing helper associated with this context. Requests made with this API will use context cookies. + Request() APIRequestContext + + // Routing provides the capability to modify network requests that are made by any page in the browser context. Once + // route is enabled, every request matching the url pattern will stall unless it's continued, fulfilled or aborted. + // **NOTE** [BrowserContext.Route] will not intercept requests intercepted by Service Worker. See + // [this] issue. We recommend disabling Service Workers when + // using request interception by setting “[object Object]” to `block`. + // + // 1. url: A glob pattern, regex pattern, or predicate that receives a [URL] to match during routing. If “[object Object]” is + // set in the context options and the provided URL is a string that does not start with `*`, it is resolved using the + // [`new URL()`](https://developer.mozilla.org/en-US/docs/Web/API/URL/URL) constructor. + // 2. handler: handler function to route the request. + // + // [this]: https://github.com/microsoft/playwright/issues/1090 + Route(url interface{}, handler routeHandler, times ...int) error + + // If specified the network requests that are made in the context will be served from the HAR file. Read more about + // [Replaying from HAR]. + // Playwright will not serve requests intercepted by Service Worker from the HAR file. See + // [this] issue. We recommend disabling Service Workers when + // using request interception by setting “[object Object]” to `block`. + // + // har: Path to a [HAR](http://www.softwareishard.com/blog/har-12-spec) file with prerecorded network data. If `path` is a + // relative path, then it is resolved relative to the current working directory. + // + // [Replaying from HAR]: https://playwright.dev/docs/mock#replaying-from-har + // [this]: https://github.com/microsoft/playwright/issues/1090 + RouteFromHAR(har string, options ...BrowserContextRouteFromHAROptions) error + + // This method allows to modify websocket connections that are made by any page in the browser context. + // Note that only `WebSocket`s created after this method was called will be routed. It is recommended to call this + // method before creating any pages. + // + // 1. url: Only WebSockets with the url matching this pattern will be routed. A string pattern can be relative to the + // “[object Object]” context option. + // 2. handler: Handler function to route the WebSocket. + RouteWebSocket(url interface{}, handler func(WebSocketRoute)) error + + // **NOTE** Service workers are only supported on Chromium-based browsers. + // All existing service workers in the context. + ServiceWorkers() []Worker + + // This setting will change the default maximum navigation time for the following methods and related shortcuts: + // - [Page.GoBack] + // - [Page.GoForward] + // - [Page.Goto] + // - [Page.Reload] + // - [Page.SetContent] + // - [Page.ExpectNavigation] + // **NOTE** [Page.SetDefaultNavigationTimeout] and [Page.SetDefaultTimeout] take priority over + // [BrowserContext.SetDefaultNavigationTimeout]. + // + // timeout: Maximum navigation time in milliseconds + SetDefaultNavigationTimeout(timeout float64) + + // This setting will change the default maximum time for all the methods accepting “[object Object]” option. + // **NOTE** [Page.SetDefaultNavigationTimeout], [Page.SetDefaultTimeout] and + // [BrowserContext.SetDefaultNavigationTimeout] take priority over [BrowserContext.SetDefaultTimeout]. + // + // timeout: Maximum time in milliseconds. Pass `0` to disable timeout. + SetDefaultTimeout(timeout float64) + + // The extra HTTP headers will be sent with every request initiated by any page in the context. These headers are + // merged with page-specific extra HTTP headers set with [Page.SetExtraHTTPHeaders]. If page overrides a particular + // header, page-specific header value will be used instead of the browser context header value. + // **NOTE** [BrowserContext.SetExtraHTTPHeaders] does not guarantee the order of headers in the outgoing requests. + // + // headers: An object containing additional HTTP headers to be sent with every request. All header values must be strings. + SetExtraHTTPHeaders(headers map[string]string) error + + // Sets the context's geolocation. Passing `null` or `undefined` emulates position unavailable. + SetGeolocation(geolocation *Geolocation) error + + // + // offline: Whether to emulate network being offline for the browser context. + SetOffline(offline bool) error + + // Returns storage state for this browser context, contains current cookies, local storage snapshot and IndexedDB + // snapshot. + StorageState(path ...string) (*StorageState, error) + + Tracing() Tracing + + // Removes all routes created with [BrowserContext.Route] and [BrowserContext.RouteFromHAR]. + UnrouteAll(options ...BrowserContextUnrouteAllOptions) error + + // Removes a route created with [BrowserContext.Route]. When “[object Object]” is not specified, removes all routes + // for the “[object Object]”. + // + // 1. url: A glob pattern, regex pattern or predicate receiving [URL] used to register a routing with [BrowserContext.Route]. + // 2. handler: Optional handler function used to register a routing with [BrowserContext.Route]. + Unroute(url interface{}, handler ...routeHandler) error + + // Performs action and waits for a [ConsoleMessage] to be logged by in the pages in the context. If predicate is + // provided, it passes [ConsoleMessage] value into the `predicate` function and waits for `predicate(message)` to + // return a truthy value. Will throw an error if the page is closed before the [BrowserContext.OnConsole] event is + // fired. + ExpectConsoleMessage(cb func() error, options ...BrowserContextExpectConsoleMessageOptions) (ConsoleMessage, error) + + // Waits for event to fire and passes its value into the predicate function. Returns when the predicate returns truthy + // value. Will throw an error if the context closes before the event is fired. Returns the event data value. + // + // event: Event name, same one would pass into `browserContext.on(event)`. + ExpectEvent(event string, cb func() error, options ...BrowserContextExpectEventOptions) (interface{}, error) + + // Performs action and waits for a new [Page] to be created in the context. If predicate is provided, it passes [Page] + // value into the `predicate` function and waits for `predicate(event)` to return a truthy value. Will throw an error + // if the context closes before new [Page] is created. + ExpectPage(cb func() error, options ...BrowserContextExpectPageOptions) (Page, error) + + // **NOTE** In most cases, you should use [BrowserContext.ExpectEvent]. + // Waits for given `event` to fire. If predicate is provided, it passes event's value into the `predicate` function + // and waits for `predicate(event)` to return a truthy value. Will throw an error if the browser context is closed + // before the `event` is fired. + // + // event: Event name, same one typically passed into `*.on(event)`. + WaitForEvent(event string, options ...BrowserContextWaitForEventOptions) (interface{}, error) +} + +// BrowserType provides methods to launch a specific browser instance or connect to an existing one. The following is +// a typical example of using Playwright to drive automation: +type BrowserType interface { + // This method attaches Playwright to an existing browser instance created via `BrowserType.launchServer` in Node.js. + // **NOTE** The major and minor version of the Playwright instance that connects needs to match the version of + // Playwright that launches the browser (1.2.3 → is compatible with 1.2.x). + // + // wsEndpoint: A Playwright browser websocket endpoint to connect to. You obtain this endpoint via `BrowserServer.wsEndpoint`. + Connect(wsEndpoint string, options ...BrowserTypeConnectOptions) (Browser, error) + + // This method attaches Playwright to an existing browser instance using the Chrome DevTools Protocol. + // The default browser context is accessible via [Browser.Contexts]. + // **NOTE** Connecting over the Chrome DevTools Protocol is only supported for Chromium-based browsers. + // **NOTE** This connection is significantly lower fidelity than the Playwright protocol connection via + // [BrowserType.Connect]. If you are experiencing issues or attempting to use advanced functionality, you probably + // want to use [BrowserType.Connect]. + // + // endpointURL: A CDP websocket endpoint or http url to connect to. For example `http://localhost:9222/` or + // `ws://127.0.0.1:9222/devtools/browser/387adf4c-243f-4051-a181-46798f4a46f4`. + ConnectOverCDP(endpointURL string, options ...BrowserTypeConnectOverCDPOptions) (Browser, error) + + // A path where Playwright expects to find a bundled browser executable. + ExecutablePath() string + + // Returns the browser instance. + // + // [Chrome Canary]: https://www.google.com/chrome/browser/canary.html + // [Dev Channel]: https://www.chromium.org/getting-involved/dev-channel + // [this article]: https://www.howtogeek.com/202825/what%E2%80%99s-the-difference-between-chromium-and-chrome/ + // [This article]: https://chromium.googlesource.com/chromium/src/+/lkgr/docs/chromium_browser_vs_google_chrome.md + Launch(options ...BrowserTypeLaunchOptions) (Browser, error) + + // Returns the persistent browser context instance. + // Launches browser that uses persistent storage located at “[object Object]” and returns the only context. Closing + // this context will automatically close the browser. + // + // userDataDir: Path to a User Data Directory, which stores browser session data like cookies and local storage. Pass an empty + // string to create a temporary directory. + // + // More details for + // [Chromium](https://chromium.googlesource.com/chromium/src/+/master/docs/user_data_dir.md#introduction) and + // [Firefox](https://wiki.mozilla.org/Firefox/CommandLineOptions#User_profile). Chromium's user data directory is the + // **parent** directory of the "Profile Path" seen at `chrome://version`. + // + // Note that browsers do not allow launching multiple instances with the same User Data Directory. + LaunchPersistentContext(userDataDir string, options ...BrowserTypeLaunchPersistentContextOptions) (BrowserContext, error) + + // Returns browser name. For example: `chromium`, `webkit` or `firefox`. + Name() string +} + +// The `CDPSession` instances are used to talk raw Chrome Devtools Protocol: +// - protocol methods can be called with `session.send` method. +// - protocol events can be subscribed to with `session.on` method. +// +// Useful links: +// - Documentation on DevTools Protocol can be found here: +// [DevTools Protocol Viewer]. +// - Getting Started with DevTools Protocol: +// https://github.com/aslushnikov/getting-started-with-cdp/blob/master/README.md +// +// [DevTools Protocol Viewer]: https://chromedevtools.github.io/devtools-protocol/ +type CDPSession interface { + EventEmitter + // Detaches the CDPSession from the target. Once detached, the CDPSession object won't emit any events and can't be + // used to send messages. + Detach() error + + // + // 1. method: Protocol method name. + // 2. params: Optional method parameters. + Send(method string, params map[string]interface{}) (interface{}, error) +} + +// Accurately simulating time-dependent behavior is essential for verifying the correctness of applications. Learn +// more about [clock emulation]. +// Note that clock is installed for the entire [BrowserContext], so the time in all the pages and iframes is +// controlled by the same clock. +// +// [clock emulation]: https://playwright.dev/docs/clock +type Clock interface { + // Advance the clock by jumping forward in time. Only fires due timers at most once. This is equivalent to user + // closing the laptop lid for a while and reopening it later, after given time. + // + // ticks: Time may be the number of milliseconds to advance the clock by or a human-readable string. Valid string formats are + // "08" for eight seconds, "01:00" for one minute and "02:34:10" for two hours, 34 minutes and ten seconds. + FastForward(ticks interface{}) error + + // Install fake implementations for the following time-related functions: + // - `Date` + // - `setTimeout` + // - `clearTimeout` + // - `setInterval` + // - `clearInterval` + // - `requestAnimationFrame` + // - `cancelAnimationFrame` + // - `requestIdleCallback` + // - `cancelIdleCallback` + // - `performance` + // Fake timers are used to manually control the flow of time in tests. They allow you to advance time, fire timers, + // and control the behavior of time-dependent functions. See [Clock.RunFor] and [Clock.FastForward] for more + // information. + Install(options ...ClockInstallOptions) error + + // Advance the clock, firing all the time-related callbacks. + // + // ticks: Time may be the number of milliseconds to advance the clock by or a human-readable string. Valid string formats are + // "08" for eight seconds, "01:00" for one minute and "02:34:10" for two hours, 34 minutes and ten seconds. + RunFor(ticks interface{}) error + + // Advance the clock by jumping forward in time and pause the time. Once this method is called, no timers are fired + // unless [Clock.RunFor], [Clock.FastForward], [Clock.PauseAt] or [Clock.Resume] is called. + // Only fires due timers at most once. This is equivalent to user closing the laptop lid for a while and reopening it + // at the specified time and pausing. + // + // time: Time to pause at. + PauseAt(time interface{}) error + + // Resumes timers. Once this method is called, time resumes flowing, timers are fired as usual. + Resume() error + + // Makes `Date.now` and `new Date()` return fixed fake time at all times, keeps all the timers running. + // Use this method for simple scenarios where you only need to test with a predefined time. For more advanced + // scenarios, use [Clock.Install] instead. Read docs on [clock emulation] to learn more. + // + // time: Time to be set. + // + // [clock emulation]: https://playwright.dev/docs/clock + SetFixedTime(time interface{}) error + + // Sets system time, but does not trigger any timers. Use this to test how the web page reacts to a time shift, for + // example switching from summer to winter time, or changing time zones. + // + // time: Time to be set. + SetSystemTime(time interface{}) error +} + +// [ConsoleMessage] objects are dispatched by page via the [Page.OnConsole] event. For each console message logged in +// the page there will be corresponding event in the Playwright context. +type ConsoleMessage interface { + // List of arguments passed to a `console` function call. See also [Page.OnConsole]. + Args() []JSHandle + + Location() *ConsoleMessageLocation + + // The page that produced this console message, if any. + Page() Page + + // The text of the console message. + Text() string + + // The text of the console message. + String() string + + // One of the following values: `log`, `debug`, `info`, `error`, `warning`, `dir`, `dirxml`, `table`, + // `trace`, `clear`, `startGroup`, `startGroupCollapsed`, `endGroup`, `assert`, `profile`, + // `profileEnd`, `count`, `timeEnd`. + Type() string +} + +// [Dialog] objects are dispatched by page via the [Page.OnDialog] event. +// An example of using `Dialog` class: +// **NOTE** Dialogs are dismissed automatically, unless there is a [Page.OnDialog] listener. When listener is present, +// it **must** either [Dialog.Accept] or [Dialog.Dismiss] the dialog - otherwise the page will +// [freeze] waiting for the dialog, +// and actions like click will never finish. +// +// [freeze]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/EventLoop#never_blocking +type Dialog interface { + // Returns when the dialog has been accepted. + Accept(promptText ...string) error + + // If dialog is prompt, returns default prompt value. Otherwise, returns empty string. + DefaultValue() string + + // Returns when the dialog has been dismissed. + Dismiss() error + + // A message displayed in the dialog. + Message() string + + // The page that initiated this dialog, if available. + Page() Page + + // Returns dialog's type, can be one of `alert`, `beforeunload`, `confirm` or `prompt`. + Type() string +} + +// [Download] objects are dispatched by page via the [Page.OnDownload] event. +// All the downloaded files belonging to the browser context are deleted when the browser context is closed. +// Download event is emitted once the download starts. Download path becomes available once download completes. +type Download interface { + // Cancels a download. Will not fail if the download is already finished or canceled. Upon successful cancellations, + // `download.failure()` would resolve to `canceled`. + Cancel() error + + // Deletes the downloaded file. Will wait for the download to finish if necessary. + Delete() error + + // Returns download error if any. Will wait for the download to finish if necessary. + Failure() error + + // Get the page that the download belongs to. + Page() Page + + // Returns path to the downloaded file for a successful download, or throws for a failed/canceled download. The method + // will wait for the download to finish if necessary. The method throws when connected remotely. + // Note that the download's file name is a random GUID, use [Download.SuggestedFilename] to get suggested file name. + Path() (string, error) + + // Copy the download to a user-specified path. It is safe to call this method while the download is still in progress. + // Will wait for the download to finish if necessary. + // + // path: Path where the download should be copied. + SaveAs(path string) error + + // Returns suggested filename for this download. It is typically computed by the browser from the + // [`Content-Disposition`] response + // header or the `download` attribute. See the spec on [whatwg]. + // Different browsers can use different logic for computing it. + // + // [`Content-Disposition`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + // [whatwg]: https://html.spec.whatwg.org/#downloading-resources + SuggestedFilename() string + + // Returns downloaded url. + URL() string + + String() string +} + +// ElementHandle represents an in-page DOM element. ElementHandles can be created with the [Page.QuerySelector] +// +// method. +// **NOTE** The use of ElementHandle is discouraged, use [Locator] objects and web-first assertions instead. +// ElementHandle prevents DOM element from garbage collection unless the handle is disposed with [JSHandle.Dispose]. +// ElementHandles are auto-disposed when their origin frame gets navigated. +// ElementHandle instances can be used as an argument in [Page.EvalOnSelector] and [Page.Evaluate] methods. +// The difference between the [Locator] and ElementHandle is that the ElementHandle points to a particular element, +// while [Locator] captures the logic of how to retrieve an element. +// In the example below, handle points to a particular DOM element on page. If that element changes text or is used by +// React to render an entirely different component, handle is still pointing to that very DOM element. This can lead +// to unexpected behaviors. +// With the locator, every time the `element` is used, up-to-date DOM element is located in the page using the +// selector. So in the snippet below, underlying DOM element is going to be located twice. +type ElementHandle interface { + JSHandle + // This method returns the bounding box of the element, or `null` if the element is not visible. The bounding box is + // calculated relative to the main frame viewport - which is usually the same as the browser window. + // Scrolling affects the returned bounding box, similarly to + // [Element.GetBoundingClientRect]. + // That means `x` and/or `y` may be negative. + // Elements from child frames return the bounding box relative to the main frame, unlike the + // [Element.GetBoundingClientRect]. + // Assuming the page is static, it is safe to use bounding box coordinates to perform input. For example, the + // following snippet should click the center of the element. + // + // [Element.GetBoundingClientRect]: https://developer.mozilla.org/en-US/docs/Web/API/Element/getBoundingClientRect + // [Element.GetBoundingClientRect]: https://developer.mozilla.org/en-US/docs/Web/API/Element/getBoundingClientRect + BoundingBox() (*Rect, error) + + // This method checks the element by performing the following steps: + // 1. Ensure that element is a checkbox or a radio input. If not, this method throws. If the element is already + // checked, this method returns immediately. + // 2. Wait for [actionability] checks on the element, unless “[object Object]” option is set. + // 3. Scroll the element into view if needed. + // 4. Use [Page.Mouse] to click in the center of the element. + // 5. Ensure that the element is now checked. If not, this method throws. + // If the element is detached from the DOM at any moment during the action, this method throws. + // When all steps combined have not finished during the specified “[object Object]”, this method throws a + // [TimeoutError]. Passing zero timeout disables this. + // + // Deprecated: Use locator-based [Locator.Check] instead. Read more about [locators]. + // + // [actionability]: https://playwright.dev/docs/actionability + // [locators]: https://playwright.dev/docs/locators + Check(options ...ElementHandleCheckOptions) error + + // This method clicks the element by performing the following steps: + // 1. Wait for [actionability] checks on the element, unless “[object Object]” option is set. + // 2. Scroll the element into view if needed. + // 3. Use [Page.Mouse] to click in the center of the element, or the specified “[object Object]”. + // 4. Wait for initiated navigations to either succeed or fail, unless “[object Object]” option is set. + // If the element is detached from the DOM at any moment during the action, this method throws. + // When all steps combined have not finished during the specified “[object Object]”, this method throws a + // [TimeoutError]. Passing zero timeout disables this. + // + // Deprecated: Use locator-based [Locator.Click] instead. Read more about [locators]. + // + // [actionability]: https://playwright.dev/docs/actionability + // [locators]: https://playwright.dev/docs/locators + Click(options ...ElementHandleClickOptions) error + + // Returns the content frame for element handles referencing iframe nodes, or `null` otherwise + ContentFrame() (Frame, error) + + // This method double clicks the element by performing the following steps: + // 1. Wait for [actionability] checks on the element, unless “[object Object]” option is set. + // 2. Scroll the element into view if needed. + // 3. Use [Page.Mouse] to double click in the center of the element, or the specified “[object Object]”. + // If the element is detached from the DOM at any moment during the action, this method throws. + // When all steps combined have not finished during the specified “[object Object]”, this method throws a + // [TimeoutError]. Passing zero timeout disables this. + // **NOTE** `elementHandle.dblclick()` dispatches two `click` events and a single `dblclick` event. + // + // Deprecated: Use locator-based [Locator.Dblclick] instead. Read more about [locators]. + // + // [actionability]: https://playwright.dev/docs/actionability + // [locators]: https://playwright.dev/docs/locators + Dblclick(options ...ElementHandleDblclickOptions) error + + // The snippet below dispatches the `click` event on the element. Regardless of the visibility state of the element, + // `click` is dispatched. This is equivalent to calling + // [element.Click()]. + // + // Deprecated: Use locator-based [Locator.DispatchEvent] instead. Read more about [locators]. + // + // 1. typ: DOM event type: `"click"`, `"dragstart"`, etc. + // 2. eventInit: Optional event-specific initialization properties. + // + // [element.Click()]: https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/click + // [DeviceMotionEvent]: https://developer.mozilla.org/en-US/docs/Web/API/DeviceMotionEvent/DeviceMotionEvent + // [DeviceOrientationEvent]: https://developer.mozilla.org/en-US/docs/Web/API/DeviceOrientationEvent/DeviceOrientationEvent + // [DragEvent]: https://developer.mozilla.org/en-US/docs/Web/API/DragEvent/DragEvent + // [Event]: https://developer.mozilla.org/en-US/docs/Web/API/Event/Event + // [FocusEvent]: https://developer.mozilla.org/en-US/docs/Web/API/FocusEvent/FocusEvent + // [KeyboardEvent]: https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/KeyboardEvent + // [MouseEvent]: https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent/MouseEvent + // [PointerEvent]: https://developer.mozilla.org/en-US/docs/Web/API/PointerEvent/PointerEvent + // [TouchEvent]: https://developer.mozilla.org/en-US/docs/Web/API/TouchEvent/TouchEvent + // [WheelEvent]: https://developer.mozilla.org/en-US/docs/Web/API/WheelEvent/WheelEvent + // [locators]: https://playwright.dev/docs/locators + DispatchEvent(typ string, eventInit ...interface{}) error + + // Returns the return value of “[object Object]”. + // The method finds an element matching the specified selector in the `ElementHandle`s subtree and passes it as a + // first argument to “[object Object]”. If no elements match the selector, the method throws an error. + // If “[object Object]” returns a [Promise], then [ElementHandle.EvalOnSelector] would wait for the promise to resolve + // and return its value. + // + // Deprecated: This method does not wait for the element to pass actionability checks and therefore can lead to the flaky tests. Use [Locator.Evaluate], other [Locator] helper methods or web-first assertions instead. + // + // 1. selector: A selector to query for. + // 2. expression: JavaScript expression to be evaluated in the browser context. If the expression evaluates to a function, the + // function is automatically invoked. + // 3. arg: Optional argument to pass to “[object Object]”. + EvalOnSelector(selector string, expression string, arg ...interface{}) (interface{}, error) + + // Returns the return value of “[object Object]”. + // The method finds all elements matching the specified selector in the `ElementHandle`'s subtree and passes an array + // of matched elements as a first argument to “[object Object]”. + // If “[object Object]” returns a [Promise], then [ElementHandle.EvalOnSelectorAll] would wait for the promise to + // resolve and return its value. + // + // Deprecated: In most cases, [Locator.EvaluateAll], other [Locator] helper methods and web-first assertions do a better job. + // + // 1. selector: A selector to query for. + // 2. expression: JavaScript expression to be evaluated in the browser context. If the expression evaluates to a function, the + // function is automatically invoked. + // 3. arg: Optional argument to pass to “[object Object]”. + EvalOnSelectorAll(selector string, expression string, arg ...interface{}) (interface{}, error) + + // This method waits for [actionability] checks, focuses the element, fills it and triggers an + // `input` event after filling. Note that you can pass an empty string to clear the input field. + // If the target element is not an ``, `