summaryrefslogtreecommitdiff
path: root/vendor/github.com/testcontainers/testcontainers-go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/testcontainers/testcontainers-go')
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/.gitignore22
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/.golangci.yml86
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/.mockery.yaml11
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/LICENSE21
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/Makefile93
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/Pipfile16
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock622
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/README.md21
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/RELEASING.md201
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/cleanup.go123
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/commons-test.mk65
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/config.go29
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/container.go560
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/docker.go1803
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/docker_auth.go282
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/docker_client.go143
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go140
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/exec/processor.go129
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/file.go143
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/generate.go3
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/generic.go119
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/image.go18
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go185
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go106
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/client.go50
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go329
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go150
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_socket.go49
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go132
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go73
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/core/network/network.go52
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/internal/version.go4
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/lifecycle.go671
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/log/logger.go73
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/logconsumer.go36
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/logger_option.go45
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml147
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/mounts.go126
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/network.go60
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/options.go336
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/parallel.go110
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go427
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/provider.go155
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/reaper.go580
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/requirements.txt5
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/runtime.txt1
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/testcontainers.go54
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/testing.go173
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/all.go82
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/errors.go13
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go9
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/exec.go107
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/exit.go89
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/file.go112
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/health.go92
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go245
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/http.go338
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/log.go214
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/nop.go81
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/sql.go118
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/tls.go167
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/wait.go65
-rw-r--r--vendor/github.com/testcontainers/testcontainers-go/wait/walk.go74
64 files changed, 10598 insertions, 0 deletions
diff --git a/vendor/github.com/testcontainers/testcontainers-go/.gitignore b/vendor/github.com/testcontainers/testcontainers-go/.gitignore
new file mode 100644
index 0000000..be65781
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/.gitignore
@@ -0,0 +1,22 @@
+# Generated by golang tooling
+debug.test
+vendor
+
+# Generated docs
+site/
+.direnv/
+src/mkdocs-codeinclude-plugin
+src/pip-delete-this-directory.txt
+.idea/
+.build/
+.DS_Store
+
+TEST-*.xml
+
+**/go.work
+
+# VS Code settings
+.vscode
+
+# Environment variables
+.env
diff --git a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml
new file mode 100644
index 0000000..7db1f4d
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml
@@ -0,0 +1,86 @@
+linters:
+ enable:
+ - errcheck
+ - errorlint
+ - gci
+ - gocritic
+ - gofumpt
+ - misspell
+ - nolintlint
+ - nakedret
+ - perfsprint
+ - revive
+ - testifylint
+ - thelper
+ - usestdlibvars
+
+linters-settings:
+ errorlint:
+ # Check whether fmt.Errorf uses the %w verb for formatting errors.
+ # See the https://github.com/polyfloyd/go-errorlint for caveats.
+ errorf: true
+ # Permit more than 1 %w verb, valid per Go 1.20 (Requires errorf:true)
+ errorf-multi: true
+ # Check for plain type assertions and type switches.
+ asserts: true
+ # Check for plain error comparisons.
+ comparison: true
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/testcontainers)
+ nakedret:
+ max-func-lines: 0
+ revive:
+ rules:
+ - name: blank-imports
+ - name: context-as-argument
+ arguments:
+ - allowTypesBefore: "*testing.T"
+ - name: context-keys-type
+ - name: dot-imports
+ - name: early-return
+ arguments:
+ - "preserveScope"
+ - name: empty-block
+ - name: error-naming
+ disabled: true
+ - name: error-return
+ - name: error-strings
+ disabled: true
+ - name: errorf
+ - name: increment-decrement
+ - name: indent-error-flow
+ arguments:
+ - "preserveScope"
+ - name: range
+ - name: receiver-naming
+ - name: redefines-builtin-id
+ disabled: true
+ - name: superfluous-else
+ arguments:
+ - "preserveScope"
+ - name: time-naming
+ - name: unexported-return
+ disabled: true
+ - name: unreachable-code
+ - name: unused-parameter
+ - name: use-any
+ - name: var-declaration
+ - name: var-naming
+ arguments:
+ - ["ID"] # AllowList
+ - ["VM"] # DenyList
+ - - upperCaseConst: true # Extra parameter (upperCaseConst|skipPackageNameChecks)
+ testifylint:
+ disable:
+ - float-compare
+ - go-require
+ enable-all: true
+output:
+ formats:
+ - format: colored-line-number
+ path-prefix: "."
+run:
+ timeout: 5m
diff --git a/vendor/github.com/testcontainers/testcontainers-go/.mockery.yaml b/vendor/github.com/testcontainers/testcontainers-go/.mockery.yaml
new file mode 100644
index 0000000..2f96829
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/.mockery.yaml
@@ -0,0 +1,11 @@
+quiet: True
+disable-version-string: True
+with-expecter: True
+mockname: "mock{{.InterfaceName}}"
+filename: "{{ .InterfaceName | lower }}_mock_test.go"
+outpkg: "{{.PackageName}}_test"
+dir: "{{.InterfaceDir}}"
+packages:
+ github.com/testcontainers/testcontainers-go/wait:
+ interfaces:
+ StrategyTarget:
diff --git a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md
new file mode 100644
index 0000000..c8194c2
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing
+
+Please see the [main contributing guidelines](./docs/contributing.md).
+
+There are additional docs describing [contributing documentation changes](./docs/contributing_docs.md).
+
+### GitHub Sponsorship
+
+Testcontainers is [in the GitHub Sponsors program](https://github.com/sponsors/testcontainers)!
+
+This repository is supported by our sponsors, meaning that issues are eligible to have a 'bounty' attached to them by sponsors.
+
+Please see [the bounty policy page](https://golang.testcontainers.org/bounty) if you are interested, either as a sponsor or as a contributor.
diff --git a/vendor/github.com/testcontainers/testcontainers-go/LICENSE b/vendor/github.com/testcontainers/testcontainers-go/LICENSE
new file mode 100644
index 0000000..607a9c3
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2019 Gianluca Arbezzano
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/testcontainers/testcontainers-go/Makefile b/vendor/github.com/testcontainers/testcontainers-go/Makefile
new file mode 100644
index 0000000..8edc9d4
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/Makefile
@@ -0,0 +1,93 @@
+include ./commons-test.mk
+
+.PHONY: lint-all
+lint-all:
+ $(MAKE) lint
+ $(MAKE) -C modulegen lint
+ $(MAKE) -C examples lint-examples
+ $(MAKE) -C modules lint-modules
+
+.PHONY: test-all
+test-all: tools test-tools test-unit
+
+.PHONY: test-examples
+test-examples:
+ @echo "Running example tests..."
+ $(MAKE) -C examples test
+
+.PHONY: tidy-all
+tidy-all:
+ $(MAKE) tidy
+ $(MAKE) -C examples tidy-examples
+ $(MAKE) -C modules tidy-modules
+
+## --------------------------------------
+
+DOCS_CONTAINER=mkdocs-container
+DOCS_IMAGE=python:3.8
+
+.PHONY: clean-docs
+clean-docs:
+ @echo "Destroying docs"
+ docker rm -f $(DOCS_CONTAINER) || true
+
+.PHONY: serve-docs
+serve-docs:
+ docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \
+ -v $(PWD):/testcontainers-go \
+ -w /testcontainers-go \
+ $(DOCS_IMAGE) bash -c "pip install -Ur requirements.txt && mkdocs serve -f mkdocs.yml -a 0.0.0.0:8000"
+
+## --------------------------------------
+
+# Compose tests: Make goals to test the compose module against the latest versions of the compose and compose-go repositories.
+#
+# The following goals are available:
+#
+# - compose-clean: Clean the .build directory, and clean the go.mod and go.sum files in the testcontainers-go compose module.
+# - compose-clone: Clone the compose and compose-go repositories into the .build directory.
+# - compose-replace: Replace the docker/compose/v2 dependency in the testcontainers-go compose module with the local copy.
+# - compose-spec-replace: Replace the compose-spec/compose-go/v2 dependency in the testcontainers-go compose module with the local copy.
+# - compose-tidy: Run "go mod tidy" in the testcontainers-go compose module.
+# - compose-test-all-latest: Test the testcontainers-go compose module against the latest versions of the compose and compose-go repositories.
+# - compose-test-latest: Test the testcontainers-go compose module against the latest version of the compose repository, using current version of the compose-spec repository.
+# - compose-test-spec-latest: Test the testcontainers-go compose module against the latest version of the compose-spec repository, using current version of the compose repository.
+
+.PHONY: compose-clean
+compose-clean:
+ rm -rf .build
+ cd modules/compose && git checkout -- go.mod go.sum
+
+.PHONY: compose-clone
+compose-clone: compose-clean
+ mkdir .build
+ git clone https://github.com/compose-spec/compose-go.git .build/compose-go & \
+ git clone https://github.com/docker/compose.git .build/compose
+ wait
+
+.PHONY: compose-replace
+compose-replace:
+ cd modules/compose && echo "replace github.com/docker/compose/v2 => ../../.build/compose" >> go.mod
+
+.PHONY: compose-spec-replace
+compose-spec-replace:
+ cd modules/compose && echo "replace github.com/compose-spec/compose-go/v2 => ../../.build/compose-go" >> go.mod
+
+.PHONY: compose-tidy
+compose-tidy:
+ cd modules/compose && go mod tidy
+
+# The following three goals are used in the GitHub Actions workflow to test the compose module against the latest versions of the compose and compose-spec repositories.
+# Please update the 'docker-projects-latest' workflow if you are making any changes to these goals.
+
+.PHONY: compose-test-all-latest
+compose-test-all-latest: compose-clone compose-replace compose-spec-replace compose-tidy
+ make -C modules/compose test-compose
+
+.PHONY: compose-test-latest
+compose-test-latest: compose-clone compose-replace compose-tidy
+ make -C modules/compose test-compose
+
+.PHONY: compose-test-spec-latest
+compose-test-spec-latest: compose-clone compose-spec-replace compose-tidy
+ make -C modules/compose test-compose
diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile b/vendor/github.com/testcontainers/testcontainers-go/Pipfile
new file mode 100644
index 0000000..2648278
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile
@@ -0,0 +1,16 @@
+[[source]]
+name = "pypi"
+url = "https://pypi.org/simple"
+verify_ssl = true
+
+[dev-packages]
+
+[packages]
+mkdocs = "==1.5.3"
+mkdocs-codeinclude-plugin = "==0.2.1"
+mkdocs-include-markdown-plugin = "==6.2.2"
+mkdocs-material = "==9.5.18"
+mkdocs-markdownextradata-plugin = "==0.2.6"
+
+[requires]
+python_version = "3.8"
diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock
new file mode 100644
index 0000000..d08964a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock
@@ -0,0 +1,622 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "0411eac13d1b06b42671b8a654fb269eb0c329d9a3d41f669ccf7b653ef8ad32"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.8"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "babel": {
+ "hashes": [
+ "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363",
+ "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.14.0"
+ },
+ "bracex": {
+ "hashes": [
+ "sha256:0725da5045e8d37ea9592ab3614d8b561e22c3c5fde3964699be672e072ab611",
+ "sha256:d2fcf4b606a82ac325471affe1706dd9bbaa3536c91ef86a31f6b766f3dad1d0"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.5"
+ },
+ "certifi": {
+ "hashes": [
+ "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b",
+ "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==2024.7.4"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
+ },
+ "click": {
+ "hashes": [
+ "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+ "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==8.1.7"
+ },
+ "colorama": {
+ "hashes": [
+ "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44",
+ "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'",
+ "version": "==0.4.6"
+ },
+ "ghp-import": {
+ "hashes": [
+ "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619",
+ "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"
+ ],
+ "version": "==2.1.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc",
+ "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.7"
+ },
+ "importlib-metadata": {
+ "hashes": [
+ "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1",
+ "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"
+ ],
+ "markers": "python_version < '3.10'",
+ "version": "==8.4.0"
+ },
+ "jinja2": {
+ "hashes": [
+ "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb",
+ "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==3.1.5"
+ },
+ "markdown": {
+ "hashes": [
+ "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2",
+ "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==3.7"
+ },
+ "markupsafe": {
+ "hashes": [
+ "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf",
+ "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
+ "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f",
+ "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3",
+ "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532",
+ "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
+ "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617",
+ "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df",
+ "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4",
+ "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906",
+ "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f",
+ "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4",
+ "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8",
+ "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371",
+ "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2",
+ "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465",
+ "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52",
+ "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6",
+ "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169",
+ "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
+ "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2",
+ "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0",
+ "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029",
+ "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f",
+ "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a",
+ "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced",
+ "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
+ "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c",
+ "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf",
+ "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9",
+ "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb",
+ "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad",
+ "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3",
+ "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1",
+ "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46",
+ "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
+ "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a",
+ "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee",
+ "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
+ "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5",
+ "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea",
+ "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f",
+ "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5",
+ "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e",
+ "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a",
+ "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f",
+ "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50",
+ "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a",
+ "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
+ "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4",
+ "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff",
+ "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2",
+ "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
+ "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b",
+ "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf",
+ "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5",
+ "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5",
+ "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab",
+ "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
+ "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.1.5"
+ },
+ "mergedeep": {
+ "hashes": [
+ "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8",
+ "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==1.3.4"
+ },
+ "mkdocs": {
+ "hashes": [
+ "sha256:3b3a78e736b31158d64dbb2f8ba29bd46a379d0c6e324c2246c3bc3d2189cfc1",
+ "sha256:eb7c99214dcb945313ba30426c2451b735992c73c2e10838f76d09e39ff4d0e2"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==1.5.3"
+ },
+ "mkdocs-codeinclude-plugin": {
+ "hashes": [
+ "sha256:172a917c9b257fa62850b669336151f85d3cd40312b2b52520cbcceab557ea6c",
+ "sha256:305387f67a885f0e36ec1cf977324fe1fe50d31301147194b63631d0864601b1"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==0.2.1"
+ },
+ "mkdocs-include-markdown-plugin": {
+ "hashes": [
+ "sha256:d293950f6499d2944291ca7b9bc4a60e652bbfd3e3a42b564f6cceee268694e7",
+ "sha256:f2bd5026650492a581d2fd44be6c22f90391910d76582b96a34c264f2d17875d"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==6.2.2"
+ },
+ "mkdocs-markdownextradata-plugin": {
+ "hashes": [
+ "sha256:34dd40870781784c75809596b2d8d879da783815b075336d541de1f150c94242",
+ "sha256:4aed9b43b8bec65b02598387426ca4809099ea5f5aa78bf114f3296fd46686b5"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==0.2.6"
+ },
+ "mkdocs-material": {
+ "hashes": [
+ "sha256:1e0e27fc9fe239f9064318acf548771a4629d5fd5dfd45444fd80a953fe21eb4",
+ "sha256:a43f470947053fa2405c33995f282d24992c752a50114f23f30da9d8d0c57e62"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==9.5.18"
+ },
+ "mkdocs-material-extensions": {
+ "hashes": [
+ "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443",
+ "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==1.3.1"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002",
+ "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==24.1"
+ },
+ "paginate": {
+ "hashes": [
+ "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"
+ ],
+ "version": "==0.5.6"
+ },
+ "pathspec": {
+ "hashes": [
+ "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+ "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==0.12.1"
+ },
+ "platformdirs": {
+ "hashes": [
+ "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee",
+ "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.2.2"
+ },
+ "pygments": {
+ "hashes": [
+ "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c",
+ "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==2.17.2"
+ },
+ "pymdown-extensions": {
+ "hashes": [
+ "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940",
+ "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==10.8.1"
+ },
+ "python-dateutil": {
+ "hashes": [
+ "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3",
+ "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.9.0.post0"
+ },
+ "pytz": {
+ "hashes": [
+ "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812",
+ "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"
+ ],
+ "markers": "python_version < '3.9'",
+ "version": "==2024.1"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff",
+ "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48",
+ "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086",
+ "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e",
+ "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133",
+ "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5",
+ "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484",
+ "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee",
+ "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5",
+ "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68",
+ "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a",
+ "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf",
+ "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99",
+ "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8",
+ "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85",
+ "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19",
+ "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc",
+ "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a",
+ "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1",
+ "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317",
+ "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c",
+ "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631",
+ "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d",
+ "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652",
+ "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5",
+ "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e",
+ "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b",
+ "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8",
+ "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476",
+ "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706",
+ "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563",
+ "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237",
+ "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b",
+ "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083",
+ "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180",
+ "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425",
+ "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e",
+ "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f",
+ "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725",
+ "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183",
+ "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab",
+ "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774",
+ "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725",
+ "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e",
+ "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5",
+ "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d",
+ "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290",
+ "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44",
+ "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed",
+ "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4",
+ "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba",
+ "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12",
+ "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==6.0.2"
+ },
+ "pyyaml-env-tag": {
+ "hashes": [
+ "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb",
+ "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==0.1"
+ },
+ "regex": {
+ "hashes": [
+ "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc",
+ "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5",
+ "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf",
+ "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94",
+ "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397",
+ "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82",
+ "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4",
+ "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae",
+ "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d",
+ "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db",
+ "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1",
+ "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b",
+ "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b",
+ "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666",
+ "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6",
+ "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c",
+ "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6",
+ "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c",
+ "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd",
+ "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636",
+ "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6",
+ "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962",
+ "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26",
+ "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e",
+ "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1",
+ "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b",
+ "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3",
+ "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a",
+ "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6",
+ "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257",
+ "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185",
+ "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e",
+ "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247",
+ "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31",
+ "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f",
+ "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec",
+ "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3",
+ "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b",
+ "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f",
+ "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150",
+ "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02",
+ "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17",
+ "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc",
+ "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4",
+ "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796",
+ "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f",
+ "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a",
+ "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d",
+ "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833",
+ "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f",
+ "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc",
+ "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d",
+ "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c",
+ "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10",
+ "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0",
+ "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb",
+ "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947",
+ "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae",
+ "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a",
+ "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f",
+ "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7",
+ "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925",
+ "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630",
+ "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61",
+ "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e",
+ "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58",
+ "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0",
+ "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8",
+ "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1",
+ "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1",
+ "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a",
+ "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662",
+ "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea",
+ "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1",
+ "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013",
+ "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90",
+ "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2",
+ "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e",
+ "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2024.4.28"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5",
+ "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==2.32.0"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==1.16.0"
+ },
+ "urllib3": {
+ "hashes": [
+ "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472",
+ "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.2"
+ },
+ "watchdog": {
+ "hashes": [
+ "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4",
+ "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19",
+ "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a",
+ "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa",
+ "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a",
+ "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a",
+ "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1",
+ "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc",
+ "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9",
+ "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930",
+ "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73",
+ "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b",
+ "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83",
+ "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7",
+ "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef",
+ "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1",
+ "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040",
+ "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b",
+ "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270",
+ "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c",
+ "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d",
+ "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8",
+ "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508",
+ "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b",
+ "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503",
+ "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757",
+ "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b",
+ "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29",
+ "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c",
+ "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22",
+ "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578",
+ "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e",
+ "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee",
+ "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7",
+ "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==4.0.2"
+ },
+ "wcmatch": {
+ "hashes": [
+ "sha256:567d66b11ad74384954c8af86f607857c3bdf93682349ad32066231abd556c92",
+ "sha256:af25922e2b6dbd1550fa37a4c8de7dd558d6c1bb330c641de9b907b9776cb3c4"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==9.0"
+ },
+ "zipp": {
+ "hashes": [
+ "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064",
+ "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==3.20.1"
+ }
+ },
+ "develop": {}
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/README.md b/vendor/github.com/testcontainers/testcontainers-go/README.md
new file mode 100644
index 0000000..ea21c63
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/README.md
@@ -0,0 +1,21 @@
+# Testcontainers
+
+[![Main pipeline](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml)
+[![GoDoc Reference](https://pkg.go.dev/badge/github.com/testcontainers/testcontainers-go.svg)](https://pkg.go.dev/github.com/testcontainers/testcontainers-go)
+[![Go Report Card](https://goreportcard.com/badge/github.com/testcontainers/testcontainers-go)](https://goreportcard.com/report/github.com/testcontainers/testcontainers-go)
+[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=testcontainers_testcontainers-go&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=testcontainers_testcontainers-go)
+[![License](https://img.shields.io/badge/license-MIT-blue)](https://github.com/testcontainers/testcontainers-go/blob/main/LICENSE)
+
+[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=141451032&machine=standardLinux32gb&devcontainer_path=.devcontainer%2Fdevcontainer.json&location=EastUs)
+
+[![Join our Slack](https://img.shields.io/badge/Slack-4A154B?logo=slack)](https://testcontainers.slack.com/)
+
+_Testcontainers for Go_ is a Go package that makes it simple to create and clean up container-based dependencies for
+automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers
+that should be run as part of a test and clean up those resources when the test is done.
+
+You can find more information about _Testcontainers for Go_ at [golang.testcontainers.org](https://golang.testcontainers.org), which is rendered from the [./docs](./docs) directory.
+
+## Using _Testcontainers for Go_
+
+Please visit [the quickstart guide](https://golang.testcontainers.org/quickstart) to understand how to add the dependency to your Go project.
diff --git a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md
new file mode 100644
index 0000000..31a9954
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md
@@ -0,0 +1,201 @@
+# Releasing Testcontainers for Go
+
+In order to create a release, we have added a shell script that performs all the tasks for you, allowing a dry-run mode for checking it before creating the release. We are going to explain how to use it in this document.
+
+## Prerequisites
+
+First, it's really important that you first check that the [version.go](./internal/version.go) file is up-to-date, containing the right version you want to create. That file will be used by the automation to perform the release.
+Once the version file is correct in the repository:
+
+Second, check that the git remote for the `origin` is pointing to `github.com/testcontainers/testcontainers-go`. You can check it by running:
+
+```shell
+git remote -v
+```
+
+## Prepare the release
+
+Once the remote is properly set, please follow these steps:
+
+- Run the [pre-release.sh](./scripts/pre-release.sh) shell script to run it in dry-run mode.
+- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled.
+- To prepare for a release, updating the _Testcontainers for Go_ dependency for all the modules and examples, without performing any Git operation:
+
+ DRY_RUN="false" ./scripts/pre-release.sh
+
+- The script will update the [mkdocs.yml](./mkdocks.yml) file, updating the `latest_version` field to the current version.
+- The script will update the `go.mod` files for each Go modules and example modules under the examples and modules directories, updating the version of the testcontainers-go dependency to the recently created tag.
+- The script will modify the docs for the each Go module **that was not released yet**, updating the version of _Testcontainers for Go_ where it was added to the recently created tag.
+
+An example execution, with dry-run mode enabled:
+
+```shell
+sed "s/latest_version: .*/latest_version: v0.20.1/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp
+mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" bigtable/go.mod > bigtable/go.mod.tmp
+mv bigtable/go.mod.tmp bigtable/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" cockroachdb/go.mod > cockroachdb/go.mod.tmp
+mv cockroachdb/go.mod.tmp cockroachdb/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" consul/go.mod > consul/go.mod.tmp
+mv consul/go.mod.tmp consul/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" datastore/go.mod > datastore/go.mod.tmp
+mv datastore/go.mod.tmp datastore/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" firestore/go.mod > firestore/go.mod.tmp
+mv firestore/go.mod.tmp firestore/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mongodb/go.mod > mongodb/go.mod.tmp
+mv mongodb/go.mod.tmp mongodb/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" nginx/go.mod > nginx/go.mod.tmp
+mv nginx/go.mod.tmp nginx/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pubsub/go.mod > pubsub/go.mod.tmp
+mv pubsub/go.mod.tmp pubsub/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" spanner/go.mod > spanner/go.mod.tmp
+mv spanner/go.mod.tmp spanner/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" toxiproxy/go.mod > toxiproxy/go.mod.tmp
+mv toxiproxy/go.mod.tmp toxiproxy/go.mod
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" compose/go.mod > compose/go.mod.tmp
+mv compose/go.mod.tmp compose/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" couchbase/go.mod > couchbase/go.mod.tmp
+mv couchbase/go.mod.tmp couchbase/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" localstack/go.mod > localstack/go.mod.tmp
+mv localstack/go.mod.tmp localstack/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mysql/go.mod > mysql/go.mod.tmp
+mv mysql/go.mod.tmp mysql/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" neo4j/go.mod > neo4j/go.mod.tmp
+mv neo4j/go.mod.tmp neo4j/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" postgres/go.mod > postgres/go.mod.tmp
+mv postgres/go.mod.tmp postgres/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pulsar/go.mod > pulsar/go.mod.tmp
+mv pulsar/go.mod.tmp pulsar/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redis/go.mod > redis/go.mod.tmp
+mv redis/go.mod.tmp redis/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redpanda/go.mod > redpanda/go.mod.tmp
+mv redpanda/go.mod.tmp redpanda/go.mod
+sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" vault/go.mod > vault/go.mod.tmp
+mv vault/go.mod.tmp vault/go.mod
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+go mod tidy
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" couchbase.md > couchbase.md.tmp
+mv couchbase.md.tmp couchbase.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" localstack.md > localstack.md.tmp
+mv localstack.md.tmp localstack.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" mysql.md > mysql.md.tmp
+mv mysql.md.tmp mysql.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" neo4j.md > neo4j.md.tmp
+mv neo4j.md.tmp neo4j.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" postgres.md > postgres.md.tmp
+mv postgres.md.tmp postgres.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" pulsar.md > pulsar.md.tmp
+mv pulsar.md.tmp pulsar.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" redis.md > redis.md.tmp
+mv redis.md.tmp redis.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" redpanda.md > redpanda.md.tmp
+mv redpanda.md.tmp redpanda.md
+sed "s/Not available until the next release of testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\"><span class=\"tc-version\">:material-tag: main<\/span><\/a>/Since testcontainers-go <a href=\"https:\/\/github.com\/testcontainers\/testcontainers-go\/releases\/tag\/v0.20.1\"><span class=\"tc-version\">:material-tag: v0.20.1<\/span><\/a>/g" vault.md > vault.md.tmp
+mv vault.md.tmp vault.md
+```
+
+## Performing a release
+
+Once you are satisfied with the modified files in the git state:
+
+- Run the [release.sh](./scripts/release.sh) shell script to create the release in dry-run mode.
+- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled.
+
+ DRY_RUN="false" ./scripts/release.sh
+
+- You can define the bump type, using the `BUMP_TYPE` environment variable. The default value is `minor`, but you can also use `major` or `patch` (the script will fail if the value is not one of these three):
+
+ BUMP_TYPE="major" ./scripts/release.sh
+
+- The script will commit the current state of the git repository, if the `DRY_RUN` variable is set to `false`. The modified files are the ones modified by the `pre-release.sh` script.
+- The script will create a git tag with the current value of the [version.go](./internal/version.go) file, starting with `v`: e.g. `v0.18.0`, for the following Go modules:
+ - the root module, representing the Testcontainers for Go library.
+ - all the Go modules living in both the `examples` and `modules` directory. The git tag value for these Go modules will be created using this name convention:
+
+ "${directory}/${module_name}/${version}", e.g. "examples/mysql/v0.18.0", "modules/compose/v0.18.0"
+
+- The script will update the [version.go](./internal/version.go) file, setting the next development version to the value defined in the `BUMP_TYPE` environment variable. For example, if the current version is `v0.18.0`, the script will update the [version.go](./internal/version.go) file with the next development version `v0.19.0`.
+- The script will create a commit in the **main** branch if the `DRY_RUN` variable is set to `false`.
+- The script will push the main branch including the tags to the upstream repository, https://github.com/testcontainers/testcontainers-go, if the `DRY_RUN` variable is set to `false`.
+- Finally, the script will trigger the Golang proxy to update the modules in https://proxy.golang.org/, if the `DRY_RUN` variable is set to `false`.
+
+An example execution, with dry-run mode enabled:
+
+```
+$ ./scripts/release.sh
+Current version: v0.20.1
+git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go
+git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml
+git add examples/**/go.*
+git add modules/**/go.*
+git commit -m chore: use new version (v0.20.1) in modules and examples
+git tag v0.20.1
+git tag examples/bigtable/v0.20.1
+git tag examples/datastore/v0.20.1
+git tag examples/firestore/v0.20.1
+git tag examples/mongodb/v0.20.1
+git tag examples/nginx/v0.20.1
+git tag examples/pubsub/v0.20.1
+git tag examples/spanner/v0.20.1
+git tag examples/toxiproxy/v0.20.1
+git tag modules/cockroachdb/v0.20.1
+git tag modules/compose/v0.20.1
+git tag modules/couchbase/v0.20.1
+git tag modules/localstack/v0.20.1
+git tag modules/mysql/v0.20.1
+git tag modules/neo4j/v0.20.1
+git tag modules/postgres/v0.20.1
+git tag modules/pulsar/v0.20.1
+git tag modules/redis/v0.20.1
+git tag modules/redpanda/v0.20.1
+git tag modules/vault/v0.20.1
+WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested
+Producing a minor bump of the version, from 0.20.1 to 0.21.0
+sed "s/const Version = ".*"/const Version = "0.21.0"/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp
+mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go
+git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go
+git commit -m chore: prepare for next minor development cycle (0.21.0)
+git push origin main --tags
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/bigtable/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/datastore/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/firestore/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/mongodb/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/nginx/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/pubsub/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/spanner/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/toxiproxy/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/cockroachdb/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/compose/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/couchbase/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/localstack/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/mysql/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/neo4j/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/postgres/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/pulsar/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redis/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redpanda/@v/v0.20.1.info
+curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/vault/@v/v0.20.1.info
+```
+
+Right after that, you have to:
+- Verify that the commits are in the upstream repository, otherwise, update it with the current state of the main branch.
diff --git a/vendor/github.com/testcontainers/testcontainers-go/cleanup.go b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go
new file mode 100644
index 0000000..bd93713
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go
@@ -0,0 +1,123 @@
+package testcontainers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "time"
+)
+
+// TerminateOptions is a type that holds the options for terminating a container.
+type TerminateOptions struct {
+ ctx context.Context
+ stopTimeout *time.Duration
+ volumes []string
+}
+
+// TerminateOption is a type that represents an option for terminating a container.
+type TerminateOption func(*TerminateOptions)
+
+// NewTerminateOptions returns a fully initialised TerminateOptions.
+// Defaults: StopTimeout: 10 seconds.
+func NewTerminateOptions(ctx context.Context, opts ...TerminateOption) *TerminateOptions {
+ timeout := time.Second * 10
+ options := &TerminateOptions{
+ stopTimeout: &timeout,
+ ctx: ctx,
+ }
+ for _, opt := range opts {
+ opt(options)
+ }
+ return options
+}
+
+// Context returns the context to use during a Terminate.
+func (o *TerminateOptions) Context() context.Context {
+ return o.ctx
+}
+
+// StopTimeout returns the stop timeout to use during a Terminate.
+func (o *TerminateOptions) StopTimeout() *time.Duration {
+ return o.stopTimeout
+}
+
+// Cleanup performs any clean up needed
+func (o *TerminateOptions) Cleanup() error {
+ // TODO: simplify this when when perform the client refactor.
+ if len(o.volumes) == 0 {
+ return nil
+ }
+ client, err := NewDockerClientWithOpts(o.ctx)
+ if err != nil {
+ return fmt.Errorf("docker client: %w", err)
+ }
+ defer client.Close()
+ // Best effort to remove all volumes.
+ var errs []error
+ for _, volume := range o.volumes {
+ if errRemove := client.VolumeRemove(o.ctx, volume, true); errRemove != nil {
+ errs = append(errs, fmt.Errorf("volume remove %q: %w", volume, errRemove))
+ }
+ }
+ return errors.Join(errs...)
+}
+
+// StopContext returns a TerminateOption that sets the context.
+// Default: context.Background().
+func StopContext(ctx context.Context) TerminateOption {
+ return func(c *TerminateOptions) {
+ c.ctx = ctx
+ }
+}
+
+// StopTimeout returns a TerminateOption that sets the timeout.
+// Default: See [Container.Stop].
+func StopTimeout(timeout time.Duration) TerminateOption {
+ return func(c *TerminateOptions) {
+ c.stopTimeout = &timeout
+ }
+}
+
+// RemoveVolumes returns a TerminateOption that sets additional volumes to remove.
+// This is useful when the container creates named volumes that should be removed
+// which are not removed by default.
+// Default: nil.
+func RemoveVolumes(volumes ...string) TerminateOption {
+ return func(c *TerminateOptions) {
+ c.volumes = volumes
+ }
+}
+
+// TerminateContainer calls [Container.Terminate] on the container if it is not nil.
+//
+// This should be called as a defer directly after [GenericContainer](...)
+// or a modules Run(...) to ensure the container is terminated when the
+// function ends.
+func TerminateContainer(container Container, options ...TerminateOption) error {
+ if isNil(container) {
+ return nil
+ }
+
+ err := container.Terminate(context.Background(), options...)
+ if !isCleanupSafe(err) {
+ return fmt.Errorf("terminate: %w", err)
+ }
+
+ return nil
+}
+
+// isNil returns true if val is nil or a nil instance false otherwise.
+func isNil(val any) bool {
+ if val == nil {
+ return true
+ }
+
+ valueOf := reflect.ValueOf(val)
+ switch valueOf.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+ return valueOf.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk
new file mode 100644
index 0000000..a7a214d
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk
@@ -0,0 +1,65 @@
+ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+GOBIN= $(GOPATH)/bin
+
+define go_install
+ go install $(1)
+endef
+
+$(GOBIN)/golangci-lint:
+ $(call go_install,github.com/golangci/golangci-lint/cmd/golangci-lint@v1.63.4)
+
+$(GOBIN)/gotestsum:
+ $(call go_install,gotest.tools/gotestsum@latest)
+
+$(GOBIN)/mockery:
+ $(call go_install,github.com/vektra/mockery/v2@v2.45)
+
+.PHONY: install
+install: $(GOBIN)/golangci-lint $(GOBIN)/gotestsum $(GOBIN)/mockery
+
+.PHONY: clean
+clean:
+ rm $(GOBIN)/golangci-lint
+ rm $(GOBIN)/gotestsum
+ rm $(GOBIN)/mockery
+
+.PHONY: dependencies-scan
+dependencies-scan:
+ @echo ">> Scanning dependencies in $(CURDIR)..."
+ go list -json -m all | docker run --rm -i sonatypecommunity/nancy:latest sleuth --skip-update-check
+
+.PHONY: lint
+lint: $(GOBIN)/golangci-lint
+ golangci-lint run --verbose -c $(ROOT_DIR)/.golangci.yml --fix
+
+.PHONY: generate
+generate: $(GOBIN)/mockery
+ go generate ./...
+
+.PHONY: test-%
+test-%: $(GOBIN)/gotestsum
+ @echo "Running $* tests..."
+ gotestsum \
+ --format short-verbose \
+ --rerun-fails=5 \
+ --packages="./..." \
+ --junitfile TEST-unit.xml \
+ -- \
+ -v \
+ -coverprofile=coverage.out \
+ -timeout=30m \
+ -race
+
+.PHONY: tools
+tools:
+ go mod download
+
+.PHONY: test-tools
+test-tools: $(GOBIN)/gotestsum
+
+.PHONY: tidy
+tidy:
+ go mod tidy
+
+.PHONY: pre-commit
+pre-commit: generate tidy lint
diff --git a/vendor/github.com/testcontainers/testcontainers-go/config.go b/vendor/github.com/testcontainers/testcontainers-go/config.go
new file mode 100644
index 0000000..91a3331
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/config.go
@@ -0,0 +1,29 @@
+package testcontainers
+
+import (
+ "github.com/testcontainers/testcontainers-go/internal/config"
+)
+
+// TestcontainersConfig represents the configuration for Testcontainers
+type TestcontainersConfig struct {
+ Host string `properties:"docker.host,default="` // Deprecated: use Config.Host instead
+ TLSVerify int `properties:"docker.tls.verify,default=0"` // Deprecated: use Config.TLSVerify instead
+ CertPath string `properties:"docker.cert.path,default="` // Deprecated: use Config.CertPath instead
+ RyukDisabled bool `properties:"ryuk.disabled,default=false"` // Deprecated: use Config.RyukDisabled instead
+ RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"` // Deprecated: use Config.RyukPrivileged instead
+ Config config.Config
+}
+
+// ReadConfig reads from testcontainers properties file, storing the result in a singleton instance
+// of the TestcontainersConfig struct
+func ReadConfig() TestcontainersConfig {
+ cfg := config.Read()
+ return TestcontainersConfig{
+ Host: cfg.Host,
+ TLSVerify: cfg.TLSVerify,
+ CertPath: cfg.CertPath,
+ RyukDisabled: cfg.RyukDisabled,
+ RyukPrivileged: cfg.RyukPrivileged,
+ Config: cfg,
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go
new file mode 100644
index 0000000..f1d2a78
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/container.go
@@ -0,0 +1,560 @@
+package testcontainers
+
+import (
+ "archive/tar"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/cpuguy83/dockercfg"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/go-connections/nat"
+ "github.com/google/uuid"
+ "github.com/moby/patternmatcher/ignorefile"
+
+ tcexec "github.com/testcontainers/testcontainers-go/exec"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+// DeprecatedContainer shows methods that were supported before, but are now deprecated
+// Deprecated: Use Container
+type DeprecatedContainer interface {
+ GetHostEndpoint(ctx context.Context, port string) (string, string, error)
+ GetIPAddress(ctx context.Context) (string, error)
+ LivenessCheckPorts(ctx context.Context) (nat.PortSet, error)
+ Terminate(ctx context.Context) error
+}
+
+// Container allows getting info about and controlling a single container instance
+type Container interface {
+ GetContainerID() string // get the container id from the provider
+ Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the lowest exposed port
+ PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) // get proto://ip:port string for the given exposed port
+ Host(context.Context) (string, error) // get host where the container port is exposed
+ Inspect(context.Context) (*container.InspectResponse, error) // get container info
+ MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port
+ Ports(context.Context) (nat.PortMap, error) // Deprecated: Use c.Inspect(ctx).NetworkSettings.Ports instead
+ SessionID() string // get session id
+ IsRunning() bool // IsRunning returns true if the container is running, false otherwise.
+ Start(context.Context) error // start the container
+ Stop(context.Context, *time.Duration) error // stop the container
+
+ // Terminate stops and removes the container and its image if it was built and not flagged as kept.
+ Terminate(ctx context.Context, opts ...TerminateOption) error
+
+ Logs(context.Context) (io.ReadCloser, error) // Get logs of the container
+ FollowOutput(LogConsumer) // Deprecated: it will be removed in the next major release
+ StartLogProducer(context.Context, ...LogProductionOption) error // Deprecated: Use the ContainerRequest instead
+ StopLogProducer() error // Deprecated: it will be removed in the next major release
+ Name(context.Context) (string, error) // Deprecated: Use c.Inspect(ctx).Name instead
+ State(context.Context) (*container.State, error) // returns container's running state
+ Networks(context.Context) ([]string, error) // get container networks
+ NetworkAliases(context.Context) (map[string][]string, error) // get container network aliases for a network
+ Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error)
+ ContainerIP(context.Context) (string, error) // get container ip
+ ContainerIPs(context.Context) ([]string, error) // get all container IPs
+ CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error
+ CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error
+ CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error
+ CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error)
+ GetLogProductionErrorChannel() <-chan error
+}
+
+// ImageBuildInfo defines what is needed to build an image
+type ImageBuildInfo interface {
+ BuildOptions() (types.ImageBuildOptions, error) // converts the ImageBuildInfo to a types.ImageBuildOptions
+ GetContext() (io.Reader, error) // the path to the build context
+ GetDockerfile() string // the relative path to the Dockerfile, including the file itself
+ GetRepo() string // get repo label for image
+ GetTag() string // get tag label for image
+ BuildLogWriter() io.Writer // for output of build log, use io.Discard to disable the output
+ ShouldBuildImage() bool // return true if the image needs to be built
+ GetBuildArgs() map[string]*string // return the environment args used to build the Dockerfile
+ GetAuthConfigs() map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Return the auth configs to be able to pull from an authenticated docker registry
+}
+
+// FromDockerfile represents the parameters needed to build an image from a Dockerfile
+// rather than using a pre-built one
+type FromDockerfile struct {
+ Context string // the path to the context of the docker build
+ ContextArchive io.ReadSeeker // the tar archive file to send to docker that contains the build context
+ Dockerfile string // the path from the context to the Dockerfile for the image, defaults to "Dockerfile"
+ Repo string // the repo label for image, defaults to UUID
+ Tag string // the tag label for image, defaults to UUID
+ BuildArgs map[string]*string // enable user to pass build args to docker daemon
+ PrintBuildLog bool // Deprecated: Use BuildLogWriter instead
+ BuildLogWriter io.Writer // for output of build log, defaults to io.Discard
+ AuthConfigs map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Enable auth configs to be able to pull from an authenticated docker registry
+ // KeepImage describes whether DockerContainer.Terminate should not delete the
+ // container image. Useful for images that are built from a Dockerfile and take a
+ // long time to build. Keeping the image also Docker to reuse it.
+ KeepImage bool
+ // BuildOptionsModifier Modifier for the build options before image build. Use it for
+ // advanced configurations while building the image. Please consider that the modifier
+ // is called after the default build options are set.
+ BuildOptionsModifier func(*types.ImageBuildOptions)
+}
+
+type ContainerFile struct {
+ HostFilePath string // If Reader is present, HostFilePath is ignored
+ Reader io.Reader // If Reader is present, HostFilePath is ignored
+ ContainerFilePath string
+ FileMode int64
+}
+
+// validate validates the ContainerFile
+func (c *ContainerFile) validate() error {
+ if c.HostFilePath == "" && c.Reader == nil {
+ return errors.New("either HostFilePath or Reader must be specified")
+ }
+
+ if c.ContainerFilePath == "" {
+ return errors.New("ContainerFilePath must be specified")
+ }
+
+ return nil
+}
+
+// ContainerRequest represents the parameters used to get a running container
+type ContainerRequest struct {
+ FromDockerfile
+ HostAccessPorts []int
+ Image string
+ ImageSubstitutors []ImageSubstitutor
+ Entrypoint []string
+ Env map[string]string
+ ExposedPorts []string // allow specifying protocol info
+ Cmd []string
+ Labels map[string]string
+ Mounts ContainerMounts
+ Tmpfs map[string]string
+ RegistryCred string // Deprecated: Testcontainers will detect registry credentials automatically
+ WaitingFor wait.Strategy
+ Name string // for specifying container name
+ Hostname string
+ WorkingDir string // specify the working directory of the container
+ ExtraHosts []string // Deprecated: Use HostConfigModifier instead
+ Privileged bool // For starting privileged container
+ Networks []string // for specifying network names
+ NetworkAliases map[string][]string // for specifying network aliases
+ NetworkMode container.NetworkMode // Deprecated: Use HostConfigModifier instead
+ Resources container.Resources // Deprecated: Use HostConfigModifier instead
+ Files []ContainerFile // files which will be copied when container starts
+ User string // for specifying uid:gid
+ SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable
+ ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper image
+ ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session
+ AutoRemove bool // Deprecated: Use HostConfigModifier instead. If set to true, the container will be removed from the host when stopped
+ AlwaysPullImage bool // Always pull image
+ ImagePlatform string // ImagePlatform describes the platform which the image runs on.
+ Binds []string // Deprecated: Use HostConfigModifier instead
+ ShmSize int64 // Amount of memory shared with the host (in bytes)
+ CapAdd []string // Deprecated: Use HostConfigModifier instead. Add Linux capabilities
+ CapDrop []string // Deprecated: Use HostConfigModifier instead. Drop Linux capabilities
+ ConfigModifier func(*container.Config) // Modifier for the config before container creation
+ HostConfigModifier func(*container.HostConfig) // Modifier for the host config before container creation
+ EndpointSettingsModifier func(map[string]*network.EndpointSettings) // Modifier for the network settings before container creation
+ LifecycleHooks []ContainerLifecycleHooks // define hooks to be executed during container lifecycle
+ LogConsumerCfg *LogConsumerConfig // define the configuration for the log producer and its log consumers to follow the logs
+}
+
+// sessionID returns the session ID for the container request.
+func (c *ContainerRequest) sessionID() string {
+ if sessionID := c.Labels[core.LabelSessionID]; sessionID != "" {
+ return sessionID
+ }
+
+ return core.SessionID()
+}
+
+// containerOptions functional options for a container
+type containerOptions struct {
+ ImageName string
+ RegistryCredentials string // Deprecated: Testcontainers will detect registry credentials automatically
+}
+
+// Deprecated: it will be removed in the next major release
+// functional option for setting the reaper image
+type ContainerOption func(*containerOptions)
+
+// Deprecated: it will be removed in the next major release
+// WithImageName sets the reaper image name
+func WithImageName(imageName string) ContainerOption {
+ return func(o *containerOptions) {
+ o.ImageName = imageName
+ }
+}
+
+// Deprecated: Testcontainers will detect registry credentials automatically, and it will be removed in the next major release
+// WithRegistryCredentials sets the reaper registry credentials
+func WithRegistryCredentials(registryCredentials string) ContainerOption {
+ return func(o *containerOptions) {
+ o.RegistryCredentials = registryCredentials
+ }
+}
+
+// Validate ensures that the ContainerRequest does not have invalid parameters configured to it
+// ex. make sure you are not specifying both an image as well as a context
+func (c *ContainerRequest) Validate() error {
+ validationMethods := []func() error{
+ c.validateContextAndImage,
+ c.validateContextOrImageIsSpecified,
+ c.validateMounts,
+ }
+
+ var err error
+ for _, validationMethod := range validationMethods {
+ err = validationMethod()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetContext retrieve the build context for the request
+// Must be closed when no longer needed.
+func (c *ContainerRequest) GetContext() (io.Reader, error) {
+ includes := []string{"."}
+
+ if c.ContextArchive != nil {
+ return c.ContextArchive, nil
+ }
+
+ // always pass context as absolute path
+ abs, err := filepath.Abs(c.Context)
+ if err != nil {
+ return nil, fmt.Errorf("error getting absolute path: %w", err)
+ }
+ c.Context = abs
+
+ dockerIgnoreExists, excluded, err := parseDockerIgnore(abs)
+ if err != nil {
+ return nil, err
+ }
+
+ if dockerIgnoreExists {
+ // only add .dockerignore if it exists
+ includes = append(includes, ".dockerignore")
+ }
+
+ includes = append(includes, c.GetDockerfile())
+
+ buildContext, err := archive.TarWithOptions(
+ c.Context,
+ &archive.TarOptions{ExcludePatterns: excluded, IncludeFiles: includes},
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return buildContext, nil
+}
+
+// parseDockerIgnore returns if the file exists, the excluded files and an error if any
+func parseDockerIgnore(targetDir string) (bool, []string, error) {
+ // based on https://github.com/docker/cli/blob/master/cli/command/image/build/dockerignore.go#L14
+ fileLocation := filepath.Join(targetDir, ".dockerignore")
+ var excluded []string
+ exists := false
+ if f, openErr := os.Open(fileLocation); openErr == nil {
+ defer f.Close()
+
+ exists = true
+
+ var err error
+ excluded, err = ignorefile.ReadAll(f)
+ if err != nil {
+ return true, excluded, fmt.Errorf("error reading .dockerignore: %w", err)
+ }
+ }
+ return exists, excluded, nil
+}
+
+// GetBuildArgs returns the env args to be used when creating from Dockerfile
+func (c *ContainerRequest) GetBuildArgs() map[string]*string {
+ return c.FromDockerfile.BuildArgs
+}
+
+// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile".
+// Sets FromDockerfile.Dockerfile to the default if blank.
+func (c *ContainerRequest) GetDockerfile() string {
+ if c.FromDockerfile.Dockerfile == "" {
+ c.FromDockerfile.Dockerfile = "Dockerfile"
+ }
+
+ return c.FromDockerfile.Dockerfile
+}
+
+// GetRepo returns the Repo label for image from the ContainerRequest, defaults to UUID.
+// Sets FromDockerfile.Repo to the default value if blank.
+func (c *ContainerRequest) GetRepo() string {
+ if c.FromDockerfile.Repo == "" {
+ c.FromDockerfile.Repo = uuid.NewString()
+ }
+
+ return strings.ToLower(c.FromDockerfile.Repo)
+}
+
+// GetTag returns the Tag label for image from the ContainerRequest, defaults to UUID.
+// Sets FromDockerfile.Tag to the default value if blank.
+func (c *ContainerRequest) GetTag() string {
+ if c.FromDockerfile.Tag == "" {
+ c.FromDockerfile.Tag = uuid.NewString()
+ }
+
+ return strings.ToLower(c.FromDockerfile.Tag)
+}
+
+// Deprecated: Testcontainers will detect registry credentials automatically, and it will be removed in the next major release.
+// GetAuthConfigs returns the auth configs to be able to pull from an authenticated docker registry.
+// Panics if an error occurs.
+func (c *ContainerRequest) GetAuthConfigs() map[string]registry.AuthConfig {
+ auth, err := getAuthConfigsFromDockerfile(c)
+ if err != nil {
+ panic(fmt.Sprintf("failed to get auth configs from Dockerfile: %v", err))
+ }
+ return auth
+}
+
+// dockerFileImages returns the images from the request Dockerfile.
+func (c *ContainerRequest) dockerFileImages() ([]string, error) {
+ if c.ContextArchive == nil {
+ // Source is a directory, we can read the Dockerfile directly.
+ images, err := core.ExtractImagesFromDockerfile(filepath.Join(c.Context, c.GetDockerfile()), c.GetBuildArgs())
+ if err != nil {
+ return nil, fmt.Errorf("extract images from Dockerfile: %w", err)
+ }
+
+ return images, nil
+ }
+
+ // Source is an archive, we need to read it to get the Dockerfile.
+ dockerFile := c.GetDockerfile()
+ tr := tar.NewReader(c.FromDockerfile.ContextArchive)
+
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil, fmt.Errorf("Dockerfile %q not found in context archive", dockerFile)
+ }
+
+ return nil, fmt.Errorf("reading tar archive: %w", err)
+ }
+
+ if hdr.Name != dockerFile {
+ continue
+ }
+
+ images, err := core.ExtractImagesFromReader(tr, c.GetBuildArgs())
+ if err != nil {
+ return nil, fmt.Errorf("extract images from Dockerfile: %w", err)
+ }
+
+ // Reset the archive to the beginning.
+ if _, err := c.ContextArchive.Seek(0, io.SeekStart); err != nil {
+ return nil, fmt.Errorf("seek context archive to start: %w", err)
+ }
+
+ return images, nil
+ }
+}
+
+// getAuthConfigsFromDockerfile returns the auth configs to be able to pull from an authenticated docker registry
+func getAuthConfigsFromDockerfile(c *ContainerRequest) (map[string]registry.AuthConfig, error) {
+ images, err := c.dockerFileImages()
+ if err != nil {
+ return nil, fmt.Errorf("docker file images: %w", err)
+ }
+
+ // Get the auth configs once for all images as it can be a time-consuming operation.
+ configs, err := getDockerAuthConfigs()
+ if err != nil {
+ return nil, err
+ }
+
+ authConfigs := map[string]registry.AuthConfig{}
+ for _, image := range images {
+ registry, authConfig, err := dockerImageAuth(context.Background(), image, configs)
+ if err != nil {
+ if !errors.Is(err, dockercfg.ErrCredentialsNotFound) {
+ return nil, fmt.Errorf("docker image auth %q: %w", image, err)
+ }
+
+ // Credentials not found no config to add.
+ continue
+ }
+
+ authConfigs[registry] = authConfig
+ }
+
+ return authConfigs, nil
+}
+
+func (c *ContainerRequest) ShouldBuildImage() bool {
+ return c.FromDockerfile.Context != "" || c.FromDockerfile.ContextArchive != nil
+}
+
+func (c *ContainerRequest) ShouldKeepBuiltImage() bool {
+ return c.FromDockerfile.KeepImage
+}
+
+// BuildLogWriter returns the io.Writer for output of log when building a Docker image from
+// a Dockerfile. It returns the BuildLogWriter from the ContainerRequest, defaults to io.Discard.
+// For backward compatibility, if BuildLogWriter is default and PrintBuildLog is true,
+// the function returns os.Stderr.
+func (c *ContainerRequest) BuildLogWriter() io.Writer {
+ if c.FromDockerfile.BuildLogWriter != nil {
+ return c.FromDockerfile.BuildLogWriter
+ }
+ if c.FromDockerfile.PrintBuildLog {
+ c.FromDockerfile.BuildLogWriter = os.Stderr
+ } else {
+ c.FromDockerfile.BuildLogWriter = io.Discard
+ }
+ return c.FromDockerfile.BuildLogWriter
+}
+
+// BuildOptions returns the image build options when building a Docker image from a Dockerfile.
+// It will apply some defaults and finally call the BuildOptionsModifier from the FromDockerfile struct,
+// if set.
+func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) {
+ buildOptions := types.ImageBuildOptions{
+ Remove: true,
+ ForceRemove: true,
+ }
+
+ if c.FromDockerfile.BuildOptionsModifier != nil {
+ c.FromDockerfile.BuildOptionsModifier(&buildOptions)
+ }
+
+ // apply mandatory values after the modifier
+ buildOptions.BuildArgs = c.GetBuildArgs()
+ buildOptions.Dockerfile = c.GetDockerfile()
+
+ // Make sure the auth configs from the Dockerfile are set right after the user-defined build options.
+ authsFromDockerfile, err := getAuthConfigsFromDockerfile(c)
+ if err != nil {
+ return types.ImageBuildOptions{}, fmt.Errorf("auth configs from Dockerfile: %w", err)
+ }
+
+ if buildOptions.AuthConfigs == nil {
+ buildOptions.AuthConfigs = map[string]registry.AuthConfig{}
+ }
+
+ for registry, authConfig := range authsFromDockerfile {
+ buildOptions.AuthConfigs[registry] = authConfig
+ }
+
+ // make sure the first tag is the one defined in the ContainerRequest
+ tag := fmt.Sprintf("%s:%s", c.GetRepo(), c.GetTag())
+
+ // apply substitutors to the built image
+ for _, is := range c.ImageSubstitutors {
+ modifiedTag, err := is.Substitute(tag)
+ if err != nil {
+ return types.ImageBuildOptions{}, fmt.Errorf("failed to substitute image %s with %s: %w", tag, is.Description(), err)
+ }
+
+ if modifiedTag != tag {
+ log.Printf("✍🏼 Replacing image with %s. From: %s to %s\n", is.Description(), tag, modifiedTag)
+ tag = modifiedTag
+ }
+ }
+
+ if len(buildOptions.Tags) > 0 {
+ // prepend the tag
+ buildOptions.Tags = append([]string{tag}, buildOptions.Tags...)
+ } else {
+ buildOptions.Tags = []string{tag}
+ }
+
+ if !c.ShouldKeepBuiltImage() {
+ dst := GenericLabels()
+ if err = core.MergeCustomLabels(dst, c.Labels); err != nil {
+ return types.ImageBuildOptions{}, err
+ }
+ if err = core.MergeCustomLabels(dst, buildOptions.Labels); err != nil {
+ return types.ImageBuildOptions{}, err
+ }
+ buildOptions.Labels = dst
+ }
+
+ // Do this as late as possible to ensure we don't leak the context on error/panic.
+ buildContext, err := c.GetContext()
+ if err != nil {
+ return types.ImageBuildOptions{}, err
+ }
+
+ buildOptions.Context = buildContext
+
+ return buildOptions, nil
+}
+
+func (c *ContainerRequest) validateContextAndImage() error {
+ if c.FromDockerfile.Context != "" && c.Image != "" {
+ return errors.New("you cannot specify both an Image and Context in a ContainerRequest")
+ }
+
+ return nil
+}
+
+func (c *ContainerRequest) validateContextOrImageIsSpecified() error {
+ if c.FromDockerfile.Context == "" && c.FromDockerfile.ContextArchive == nil && c.Image == "" {
+ return errors.New("you must specify either a build context or an image")
+ }
+
+ return nil
+}
+
+// validateMounts ensures that the mounts do not have duplicate targets.
+// It will check the Mounts and HostConfigModifier.Binds fields.
+func (c *ContainerRequest) validateMounts() error {
+ targets := make(map[string]bool, len(c.Mounts))
+
+ for idx := range c.Mounts {
+ m := c.Mounts[idx]
+ targetPath := m.Target.Target()
+ if targets[targetPath] {
+ return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath)
+ }
+ targets[targetPath] = true
+ }
+
+ if c.HostConfigModifier == nil {
+ return nil
+ }
+
+ hostConfig := container.HostConfig{}
+
+ c.HostConfigModifier(&hostConfig)
+
+ if len(hostConfig.Binds) > 0 {
+ for _, bind := range hostConfig.Binds {
+ parts := strings.Split(bind, ":")
+ if len(parts) != 2 && len(parts) != 3 {
+ return fmt.Errorf("%w: %s", ErrInvalidBindMount, bind)
+ }
+ targetPath := parts[1]
+ if targets[targetPath] {
+ return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath)
+ }
+ targets[targetPath] = true
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go
new file mode 100644
index 0000000..774a364
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/docker.go
@@ -0,0 +1,1803 @@
+package testcontainers
+
+import (
+ "archive/tar"
+ "bufio"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sync"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/containerd/platforms"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/errdefs"
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/docker/go-connections/nat"
+ "github.com/moby/term"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+
+ tcexec "github.com/testcontainers/testcontainers-go/exec"
+ "github.com/testcontainers/testcontainers-go/internal/config"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+// Implement interfaces
+var _ Container = (*DockerContainer)(nil)
+
+const (
+ Bridge = "bridge" // Bridge network name (as well as driver)
+ Podman = "podman"
+ ReaperDefault = "reaper_default" // Default network name when bridge is not available
+ packagePath = "github.com/testcontainers/testcontainers-go"
+)
+
+var (
+ // createContainerFailDueToNameConflictRegex is a regular expression that matches the container is already in use error.
+ createContainerFailDueToNameConflictRegex = regexp.MustCompile("Conflict. The container name .* is already in use by container .*")
+
+ // minLogProductionTimeout is the minimum log production timeout.
+ minLogProductionTimeout = time.Duration(5 * time.Second)
+
+ // maxLogProductionTimeout is the maximum log production timeout.
+ maxLogProductionTimeout = time.Duration(60 * time.Second)
+
+ // errLogProductionStop is the cause for stopping log production.
+ errLogProductionStop = errors.New("log production stopped")
+)
+
+// DockerContainer represents a container started using Docker
+type DockerContainer struct {
+ // Container ID from Docker
+ ID string
+ WaitingFor wait.Strategy
+ Image string
+ exposedPorts []string // a reference to the container's requested exposed ports. It allows checking they are ready before any wait strategy
+
+ isRunning bool
+ imageWasBuilt bool
+ // keepBuiltImage makes Terminate not remove the image if imageWasBuilt.
+ keepBuiltImage bool
+ provider *DockerProvider
+ sessionID string
+ terminationSignal chan bool
+ consumers []LogConsumer
+
+ // TODO: Remove locking and wait group once the deprecated StartLogProducer and
+ // StopLogProducer have been removed and hence logging can only be started and
+ // stopped once.
+
+ // logProductionCancel is used to signal the log production to stop.
+ logProductionCancel context.CancelCauseFunc
+ logProductionCtx context.Context
+
+ logProductionTimeout *time.Duration
+ logger log.Logger
+ lifecycleHooks []ContainerLifecycleHooks
+
+ healthStatus string // container health status, will default to healthStatusNone if no healthcheck is present
+}
+
+// SetLogger sets the logger for the container
+func (c *DockerContainer) SetLogger(logger log.Logger) {
+ c.logger = logger
+}
+
+// SetProvider sets the provider for the container
+func (c *DockerContainer) SetProvider(provider *DockerProvider) {
+ c.provider = provider
+}
+
+// SetTerminationSignal sets the termination signal for the container
+func (c *DockerContainer) SetTerminationSignal(signal chan bool) {
+ c.terminationSignal = signal
+}
+
+func (c *DockerContainer) GetContainerID() string {
+ return c.ID
+}
+
+func (c *DockerContainer) IsRunning() bool {
+ return c.isRunning
+}
+
+// Endpoint gets proto://host:port string for the lowest numbered exposed port
+// Will returns just host:port if proto is ""
+func (c *DockerContainer) Endpoint(ctx context.Context, proto string) (string, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ // Get lowest numbered bound port.
+ var lowestPort nat.Port
+ for port := range inspect.NetworkSettings.Ports {
+ if lowestPort == "" || port.Int() < lowestPort.Int() {
+ lowestPort = port
+ }
+ }
+
+ return c.PortEndpoint(ctx, lowestPort, proto)
+}
+
+// PortEndpoint gets proto://host:port string for the given exposed port
+// Will returns just host:port if proto is ""
+func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) {
+ host, err := c.Host(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ outerPort, err := c.MappedPort(ctx, port)
+ if err != nil {
+ return "", err
+ }
+
+ protoFull := ""
+ if proto != "" {
+ protoFull = proto + "://"
+ }
+
+ return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil
+}
+
+// Host gets host (ip or name) of the docker daemon where the container port is exposed
+// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel
+// You can use the "TESTCONTAINERS_HOST_OVERRIDE" env variable to set this yourself
+func (c *DockerContainer) Host(ctx context.Context) (string, error) {
+ host, err := c.provider.DaemonHost(ctx)
+ if err != nil {
+ return "", err
+ }
+ return host, nil
+}
+
+// Inspect gets the raw container info
+func (c *DockerContainer) Inspect(ctx context.Context) (*container.InspectResponse, error) {
+ jsonRaw, err := c.inspectRawContainer(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return jsonRaw, nil
+}
+
+// MappedPort gets externally mapped port for a container port
+func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return "", fmt.Errorf("inspect: %w", err)
+ }
+ if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" {
+ return port, nil
+ }
+
+ ports := inspect.NetworkSettings.Ports
+
+ for k, p := range ports {
+ if k.Port() != port.Port() {
+ continue
+ }
+ if port.Proto() != "" && k.Proto() != port.Proto() {
+ continue
+ }
+ if len(p) == 0 {
+ continue
+ }
+ return nat.NewPort(k.Proto(), p[0].HostPort)
+ }
+
+ return "", errdefs.NotFound(fmt.Errorf("port %q not found", port))
+}
+
+// Deprecated: use c.Inspect(ctx).NetworkSettings.Ports instead.
+// Ports gets the exposed ports for the container.
+func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return inspect.NetworkSettings.Ports, nil
+}
+
+// SessionID gets the current session id
+func (c *DockerContainer) SessionID() string {
+ return c.sessionID
+}
+
+// Start will start an already created container
+func (c *DockerContainer) Start(ctx context.Context) error {
+ err := c.startingHook(ctx)
+ if err != nil {
+ return fmt.Errorf("starting hook: %w", err)
+ }
+
+ if err := c.provider.client.ContainerStart(ctx, c.ID, container.StartOptions{}); err != nil {
+ return fmt.Errorf("container start: %w", err)
+ }
+ defer c.provider.Close()
+
+ err = c.startedHook(ctx)
+ if err != nil {
+ return fmt.Errorf("started hook: %w", err)
+ }
+
+ c.isRunning = true
+
+ err = c.readiedHook(ctx)
+ if err != nil {
+ return fmt.Errorf("readied hook: %w", err)
+ }
+
+ return nil
+}
+
+// Stop stops the container.
+//
+// In case the container fails to stop gracefully within a time frame specified
+// by the timeout argument, it is forcefully terminated (killed).
+//
+// If the timeout is nil, the container's StopTimeout value is used, if set,
+// otherwise the engine default. A negative timeout value can be specified,
+// meaning no timeout, i.e. no forceful termination is performed.
+//
+// All hooks are called in the following order:
+// - [ContainerLifecycleHooks.PreStops]
+// - [ContainerLifecycleHooks.PostStops]
+//
+// If the container is already stopped, the method is a no-op.
+func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) error {
+ // Note we can't check isRunning here because we allow external creation
+ // without exposing the ability to fully initialize the container state.
+ // See: https://github.com/testcontainers/testcontainers-go/issues/2667
+ // TODO: Add a check for isRunning when the above issue is resolved.
+ err := c.stoppingHook(ctx)
+ if err != nil {
+ return fmt.Errorf("stopping hook: %w", err)
+ }
+
+ var options container.StopOptions
+
+ if timeout != nil {
+ timeoutSeconds := int(timeout.Seconds())
+ options.Timeout = &timeoutSeconds
+ }
+
+ if err := c.provider.client.ContainerStop(ctx, c.ID, options); err != nil {
+ return fmt.Errorf("container stop: %w", err)
+ }
+
+ defer c.provider.Close()
+
+ c.isRunning = false
+
+ err = c.stoppedHook(ctx)
+ if err != nil {
+ return fmt.Errorf("stopped hook: %w", err)
+ }
+
+ return nil
+}
+
+// Terminate calls stops and then removes the container including its volumes.
+// If its image was built it and all child images are also removed unless
+// the [FromDockerfile.KeepImage] on the [ContainerRequest] was set to true.
+//
+// The following hooks are called in order:
+// - [ContainerLifecycleHooks.PreTerminates]
+// - [ContainerLifecycleHooks.PostTerminates]
+//
+// Default: timeout is 10 seconds.
+func (c *DockerContainer) Terminate(ctx context.Context, opts ...TerminateOption) error {
+ options := NewTerminateOptions(ctx, opts...)
+ err := c.Stop(options.Context(), options.StopTimeout())
+ if err != nil && !isCleanupSafe(err) {
+ return fmt.Errorf("stop: %w", err)
+ }
+
+ select {
+ // Close reaper connection if it was attached.
+ case c.terminationSignal <- true:
+ default:
+ }
+
+ defer c.provider.client.Close()
+
+ // TODO: Handle errors from ContainerRemove more correctly, e.g. should we
+ // run the terminated hook?
+ errs := []error{
+ c.terminatingHook(ctx),
+ c.provider.client.ContainerRemove(ctx, c.GetContainerID(), container.RemoveOptions{
+ RemoveVolumes: true,
+ Force: true,
+ }),
+ c.terminatedHook(ctx),
+ }
+
+ if c.imageWasBuilt && !c.keepBuiltImage {
+ _, err := c.provider.client.ImageRemove(ctx, c.Image, image.RemoveOptions{
+ Force: true,
+ PruneChildren: true,
+ })
+ errs = append(errs, err)
+ }
+
+ c.sessionID = ""
+ c.isRunning = false
+
+ if err = options.Cleanup(); err != nil {
+ errs = append(errs, err)
+ }
+
+ return errors.Join(errs...)
+}
+
+// update container raw info
+func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*container.InspectResponse, error) {
+ defer c.provider.Close()
+ inspect, err := c.provider.client.ContainerInspect(ctx, c.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ return &inspect, nil
+}
+
+// Logs will fetch both STDOUT and STDERR from the current container. Returns a
+// ReadCloser and leaves it up to the caller to extract what it wants.
+func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) {
+ const streamHeaderSize = 8
+
+ options := container.LogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ }
+
+ rc, err := c.provider.client.ContainerLogs(ctx, c.ID, options)
+ if err != nil {
+ return nil, err
+ }
+ defer c.provider.Close()
+
+ pr, pw := io.Pipe()
+ r := bufio.NewReader(rc)
+
+ go func() {
+ lineStarted := true
+ for err == nil {
+ line, isPrefix, err := r.ReadLine()
+
+ if lineStarted && len(line) >= streamHeaderSize {
+ line = line[streamHeaderSize:] // trim stream header
+ lineStarted = false
+ }
+ if !isPrefix {
+ lineStarted = true
+ }
+
+ _, errW := pw.Write(line)
+ if errW != nil {
+ return
+ }
+
+ if !isPrefix {
+ _, errW := pw.Write([]byte("\n"))
+ if errW != nil {
+ return
+ }
+ }
+
+ if err != nil {
+ _ = pw.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return pr, nil
+}
+
+// Deprecated: use the ContainerRequest.LogConsumerConfig field instead.
+func (c *DockerContainer) FollowOutput(consumer LogConsumer) {
+ c.followOutput(consumer)
+}
+
+// followOutput adds a LogConsumer to be sent logs from the container's
+// STDOUT and STDERR
+func (c *DockerContainer) followOutput(consumer LogConsumer) {
+ c.consumers = append(c.consumers, consumer)
+}
+
+// Deprecated: use c.Inspect(ctx).Name instead.
+// Name gets the name of the container.
+func (c *DockerContainer) Name(ctx context.Context) (string, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return "", err
+ }
+ return inspect.Name, nil
+}
+
+// State returns container's running state.
+func (c *DockerContainer) State(ctx context.Context) (*container.State, error) {
+ inspect, err := c.inspectRawContainer(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return inspect.State, nil
+}
+
+// Networks gets the names of the networks the container is attached to.
+func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return []string{}, err
+ }
+
+ networks := inspect.NetworkSettings.Networks
+
+ n := []string{}
+
+ for k := range networks {
+ n = append(n, k)
+ }
+
+ return n, nil
+}
+
+// ContainerIP gets the IP address of the primary network within the container.
+func (c *DockerContainer) ContainerIP(ctx context.Context) (string, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ ip := inspect.NetworkSettings.IPAddress
+ if ip == "" {
+ // use IP from "Networks" if only single network defined
+ networks := inspect.NetworkSettings.Networks
+ if len(networks) == 1 {
+ for _, v := range networks {
+ ip = v.IPAddress
+ }
+ }
+ }
+
+ return ip, nil
+}
+
+// ContainerIPs gets the IP addresses of all the networks within the container.
+func (c *DockerContainer) ContainerIPs(ctx context.Context) ([]string, error) {
+ ips := make([]string, 0)
+
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ networks := inspect.NetworkSettings.Networks
+ for _, nw := range networks {
+ ips = append(ips, nw.IPAddress)
+ }
+
+ return ips, nil
+}
+
+// NetworkAliases gets the aliases of the container for the networks it is attached to.
+func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) {
+ inspect, err := c.Inspect(ctx)
+ if err != nil {
+ return map[string][]string{}, err
+ }
+
+ networks := inspect.NetworkSettings.Networks
+
+ a := map[string][]string{}
+
+ for k := range networks {
+ a[k] = networks[k].Aliases
+ }
+
+ return a, nil
+}
+
+// Exec executes a command in the current container.
+// It returns the exit status of the executed command, an [io.Reader] containing the combined
+// stdout and stderr, and any encountered error. Note that reading directly from the [io.Reader]
+// may result in unexpected bytes due to custom stream multiplexing headers.
+// Use [tcexec.Multiplexed] option to read the combined output without the multiplexing headers.
+// Alternatively, to separate the stdout and stderr from [io.Reader] and interpret these headers properly,
+// [github.com/docker/docker/pkg/stdcopy.StdCopy] from the Docker API should be used.
+func (c *DockerContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) {
+ cli := c.provider.client
+
+ processOptions := tcexec.NewProcessOptions(cmd)
+
+ // processing all the options in a first loop because for the multiplexed option
+ // we first need to have a containerExecCreateResponse
+ for _, o := range options {
+ o.Apply(processOptions)
+ }
+
+ response, err := cli.ContainerExecCreate(ctx, c.ID, processOptions.ExecConfig)
+ if err != nil {
+ return 0, nil, fmt.Errorf("container exec create: %w", err)
+ }
+
+ hijack, err := cli.ContainerExecAttach(ctx, response.ID, container.ExecAttachOptions{})
+ if err != nil {
+ return 0, nil, fmt.Errorf("container exec attach: %w", err)
+ }
+
+ processOptions.Reader = hijack.Reader
+
+ // second loop to process the multiplexed option, as now we have a reader
+ // from the created exec response.
+ for _, o := range options {
+ o.Apply(processOptions)
+ }
+
+ var exitCode int
+ for {
+ execResp, err := cli.ContainerExecInspect(ctx, response.ID)
+ if err != nil {
+ return 0, nil, fmt.Errorf("container exec inspect: %w", err)
+ }
+
+ if !execResp.Running {
+ exitCode = execResp.ExitCode
+ break
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ return exitCode, processOptions.Reader, nil
+}
+
+type FileFromContainer struct {
+ underlying *io.ReadCloser
+ tarreader *tar.Reader
+}
+
+func (fc *FileFromContainer) Read(b []byte) (int, error) {
+ return (*fc.tarreader).Read(b)
+}
+
+func (fc *FileFromContainer) Close() error {
+ return (*fc.underlying).Close()
+}
+
+func (c *DockerContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) {
+ r, _, err := c.provider.client.CopyFromContainer(ctx, c.ID, filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer c.provider.Close()
+
+ tarReader := tar.NewReader(r)
+
+ // if we got here we have exactly one file in the TAR-stream
+ // so we advance the index by one so the next call to Read will start reading it
+ _, err = tarReader.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &FileFromContainer{
+ underlying: &r,
+ tarreader: tarReader,
+ }
+
+ return ret, nil
+}
+
+// CopyDirToContainer copies the contents of a directory to a parent path in the container. This parent path must exist in the container first
+// as we cannot create it
+func (c *DockerContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error {
+ dir, err := isDir(hostDirPath)
+ if err != nil {
+ return err
+ }
+
+ if !dir {
+ // it's not a dir: let the consumer handle the error
+ return fmt.Errorf("path %s is not a directory", hostDirPath)
+ }
+
+ buff, err := tarDir(hostDirPath, fileMode)
+ if err != nil {
+ return err
+ }
+
+ // create the directory under its parent
+ parent := filepath.Dir(containerParentPath)
+
+ err = c.provider.client.CopyToContainer(ctx, c.ID, parent, buff, container.CopyToContainerOptions{})
+ if err != nil {
+ return err
+ }
+ defer c.provider.Close()
+
+ return nil
+}
+
+func (c *DockerContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error {
+ dir, err := isDir(hostFilePath)
+ if err != nil {
+ return err
+ }
+
+ if dir {
+ return c.CopyDirToContainer(ctx, hostFilePath, containerFilePath, fileMode)
+ }
+
+ f, err := os.Open(hostFilePath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ info, err := f.Stat()
+ if err != nil {
+ return err
+ }
+
+ // In Go 1.22 os.File is always an io.WriterTo. However, testcontainers
+ // currently allows Go 1.21, so we need to trick the compiler a little.
+ var file fs.File = f
+ return c.copyToContainer(ctx, func(tw io.Writer) error {
+ // Attempt optimized writeTo, implemented in linux
+ if wt, ok := file.(io.WriterTo); ok {
+ _, err := wt.WriteTo(tw)
+ return err
+ }
+ _, err := io.Copy(tw, f)
+ return err
+ }, info.Size(), containerFilePath, fileMode)
+}
+
+// CopyToContainer copies fileContent data to a file in container
+func (c *DockerContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error {
+ return c.copyToContainer(ctx, func(tw io.Writer) error {
+ _, err := tw.Write(fileContent)
+ return err
+ }, int64(len(fileContent)), containerFilePath, fileMode)
+}
+
+func (c *DockerContainer) copyToContainer(ctx context.Context, fileContent func(tw io.Writer) error, fileContentSize int64, containerFilePath string, fileMode int64) error {
+ buffer, err := tarFile(containerFilePath, fileContent, fileContentSize, fileMode)
+ if err != nil {
+ return err
+ }
+
+ err = c.provider.client.CopyToContainer(ctx, c.ID, "/", buffer, container.CopyToContainerOptions{})
+ if err != nil {
+ return err
+ }
+ defer c.provider.Close()
+
+ return nil
+}
+
+// logConsumerWriter is a writer that writes to a LogConsumer.
+type logConsumerWriter struct {
+ log Log
+ consumers []LogConsumer
+}
+
+// newLogConsumerWriter creates a new logConsumerWriter for logType that sends messages to all consumers.
+func newLogConsumerWriter(logType string, consumers []LogConsumer) *logConsumerWriter {
+ return &logConsumerWriter{
+ log: Log{LogType: logType},
+ consumers: consumers,
+ }
+}
+
+// Write writes the p content to all consumers.
+func (lw logConsumerWriter) Write(p []byte) (int, error) {
+ lw.log.Content = p
+ for _, consumer := range lw.consumers {
+ consumer.Accept(lw.log)
+ }
+ return len(p), nil
+}
+
+type LogProductionOption func(*DockerContainer)
+
+// WithLogProductionTimeout is a functional option that sets the timeout for the log production.
+// If the timeout is lower than 5s or greater than 60s it will be set to 5s or 60s respectively.
+func WithLogProductionTimeout(timeout time.Duration) LogProductionOption {
+ return func(c *DockerContainer) {
+ c.logProductionTimeout = &timeout
+ }
+}
+
+// Deprecated: use the ContainerRequest.LogConsumerConfig field instead.
+func (c *DockerContainer) StartLogProducer(ctx context.Context, opts ...LogProductionOption) error {
+ return c.startLogProduction(ctx, opts...)
+}
+
+// startLogProduction will start a concurrent process that will continuously read logs
+// from the container and will send them to each added LogConsumer.
+//
+// Default log production timeout is 5s. It is used to set the context timeout
+// which means that each log-reading loop will last at up to the specified timeout.
+//
+// Use functional option WithLogProductionTimeout() to override default timeout. If it's
+// lower than 5s and greater than 60s it will be set to 5s or 60s respectively.
+func (c *DockerContainer) startLogProduction(ctx context.Context, opts ...LogProductionOption) error {
+ for _, opt := range opts {
+ opt(c)
+ }
+
+ // Validate the log production timeout.
+ switch {
+ case c.logProductionTimeout == nil:
+ c.logProductionTimeout = &minLogProductionTimeout
+ case *c.logProductionTimeout < minLogProductionTimeout:
+ c.logProductionTimeout = &minLogProductionTimeout
+ case *c.logProductionTimeout > maxLogProductionTimeout:
+ c.logProductionTimeout = &maxLogProductionTimeout
+ }
+
+ // Setup the log writers.
+ stdout := newLogConsumerWriter(StdoutLog, c.consumers)
+ stderr := newLogConsumerWriter(StderrLog, c.consumers)
+
+ // Setup the log production context which will be used to stop the log production.
+ c.logProductionCtx, c.logProductionCancel = context.WithCancelCause(ctx)
+
+ // We capture context cancel function to avoid data race with multiple
+ // calls to startLogProduction.
+ go func(cancel context.CancelCauseFunc) {
+ // Ensure the context is cancelled when log productions completes
+ // so that GetLogProductionErrorChannel functions correctly.
+ defer cancel(nil)
+
+ c.logProducer(stdout, stderr)
+ }(c.logProductionCancel)
+
+ return nil
+}
+
+// logProducer read logs from the container and writes them to stdout, stderr until either:
+// - logProductionCtx is done
+// - A fatal error occurs
+// - No more logs are available
+func (c *DockerContainer) logProducer(stdout, stderr io.Writer) {
+ // Clean up idle client connections.
+ defer c.provider.Close()
+
+ // Setup the log options, start from the beginning.
+ options := &container.LogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: true,
+ }
+
+ // Use a separate method so that timeout cancel function is
+ // called correctly.
+ for c.copyLogsTimeout(stdout, stderr, options) {
+ }
+}
+
+// copyLogsTimeout copies logs from the container to stdout and stderr with a timeout.
+// It returns true if the log production should be retried, false otherwise.
+func (c *DockerContainer) copyLogsTimeout(stdout, stderr io.Writer, options *container.LogsOptions) bool {
+ timeoutCtx, cancel := context.WithTimeout(c.logProductionCtx, *c.logProductionTimeout)
+ defer cancel()
+
+ err := c.copyLogs(timeoutCtx, stdout, stderr, *options)
+ switch {
+ case err == nil:
+ // No more logs available.
+ return false
+ case c.logProductionCtx.Err() != nil:
+ // Log production was stopped or caller context is done.
+ return false
+ case timeoutCtx.Err() != nil, errors.Is(err, net.ErrClosed):
+ // Timeout or client connection closed, retry.
+ default:
+ // Unexpected error, retry.
+ c.logger.Printf("Unexpected error reading logs: %v", err)
+ }
+
+ // Retry from the last log received.
+ now := time.Now()
+ options.Since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond()))
+
+ return true
+}
+
+// copyLogs copies logs from the container to stdout and stderr.
+func (c *DockerContainer) copyLogs(ctx context.Context, stdout, stderr io.Writer, options container.LogsOptions) error {
+ rc, err := c.provider.client.ContainerLogs(ctx, c.GetContainerID(), options)
+ if err != nil {
+ return fmt.Errorf("container logs: %w", err)
+ }
+ defer rc.Close()
+
+ if _, err = stdcopy.StdCopy(stdout, stderr, rc); err != nil {
+ return fmt.Errorf("stdcopy: %w", err)
+ }
+
+ return nil
+}
+
+// Deprecated: it will be removed in the next major release.
+func (c *DockerContainer) StopLogProducer() error {
+ return c.stopLogProduction()
+}
+
+// stopLogProduction will stop the concurrent process that is reading logs
+// and sending them to each added LogConsumer
+func (c *DockerContainer) stopLogProduction() error {
+ if c.logProductionCancel == nil {
+ return nil
+ }
+
+ // Signal the log production to stop.
+ c.logProductionCancel(errLogProductionStop)
+
+ if err := context.Cause(c.logProductionCtx); err != nil {
+ switch {
+ case errors.Is(err, errLogProductionStop):
+ // Log production was stopped.
+ return nil
+ case errors.Is(err, context.DeadlineExceeded),
+ errors.Is(err, context.Canceled):
+ // Parent context is done.
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetLogProductionErrorChannel exposes the only way for the consumer
+// to be able to listen to errors and react to them.
+func (c *DockerContainer) GetLogProductionErrorChannel() <-chan error {
+ if c.logProductionCtx == nil {
+ return nil
+ }
+
+ errCh := make(chan error, 1)
+ go func(ctx context.Context) {
+ <-ctx.Done()
+ errCh <- context.Cause(ctx)
+ close(errCh)
+ }(c.logProductionCtx)
+
+ return errCh
+}
+
+// connectReaper connects the reaper to the container if it is needed.
+func (c *DockerContainer) connectReaper(ctx context.Context) error {
+ if c.provider.config.RyukDisabled || isReaperImage(c.Image) {
+ // Reaper is disabled or we are the reaper container.
+ return nil
+ }
+
+ reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, c.provider.host), core.SessionID(), c.provider)
+ if err != nil {
+ return fmt.Errorf("reaper: %w", err)
+ }
+
+ if c.terminationSignal, err = reaper.Connect(); err != nil {
+ return fmt.Errorf("reaper connect: %w", err)
+ }
+
+ return nil
+}
+
+// cleanupTermSignal triggers the termination signal if it was created and an error occurred.
+func (c *DockerContainer) cleanupTermSignal(err error) {
+ if c.terminationSignal != nil && err != nil {
+ c.terminationSignal <- true
+ }
+}
+
+// DockerNetwork represents a network started using Docker
+type DockerNetwork struct {
+ ID string // Network ID from Docker
+ Driver string
+ Name string
+ provider *DockerProvider
+ terminationSignal chan bool
+}
+
+// Remove is used to remove the network. It is usually triggered by as defer function.
+func (n *DockerNetwork) Remove(ctx context.Context) error {
+ select {
+ // close reaper if it was created
+ case n.terminationSignal <- true:
+ default:
+ }
+
+ defer n.provider.Close()
+
+ return n.provider.client.NetworkRemove(ctx, n.ID)
+}
+
+func (n *DockerNetwork) SetTerminationSignal(signal chan bool) {
+ n.terminationSignal = signal
+}
+
+// DockerProvider implements the ContainerProvider interface
+type DockerProvider struct {
+ *DockerProviderOptions
+ client client.APIClient
+ host string
+ hostCache string
+ config config.Config
+ mtx sync.Mutex
+}
+
+// Client gets the docker client used by the provider
+func (p *DockerProvider) Client() client.APIClient {
+ return p.client
+}
+
+// Close closes the docker client used by the provider
+func (p *DockerProvider) Close() error {
+ if p.client == nil {
+ return nil
+ }
+
+ return p.client.Close()
+}
+
+// SetClient sets the docker client to be used by the provider
+func (p *DockerProvider) SetClient(c client.APIClient) {
+ p.client = c
+}
+
+var _ ContainerProvider = (*DockerProvider)(nil)
+
+// BuildImage will build and image from context and Dockerfile, then return the tag
+func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) {
+ var buildOptions types.ImageBuildOptions
+ resp, err := backoff.RetryNotifyWithData(
+ func() (types.ImageBuildResponse, error) {
+ var err error
+ buildOptions, err = img.BuildOptions()
+ if err != nil {
+ return types.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build options: %w", err))
+ }
+ defer tryClose(buildOptions.Context) // release resources in any case
+
+ resp, err := p.client.ImageBuild(ctx, buildOptions.Context, buildOptions)
+ if err != nil {
+ if isPermanentClientError(err) {
+ return types.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build image: %w", err))
+ }
+ return types.ImageBuildResponse{}, err
+ }
+ defer p.Close()
+
+ return resp, nil
+ },
+ backoff.WithContext(backoff.NewExponentialBackOff(), ctx),
+ func(err error, _ time.Duration) {
+ p.Logger.Printf("Failed to build image: %s, will retry", err)
+ },
+ )
+ if err != nil {
+ return "", err // Error is already wrapped.
+ }
+ defer resp.Body.Close()
+
+ output := img.BuildLogWriter()
+
+ // Always process the output, even if it is not printed
+ // to ensure that errors during the build process are
+ // correctly handled.
+ termFd, isTerm := term.GetFdInfo(output)
+ if err = jsonmessage.DisplayJSONMessagesStream(resp.Body, output, termFd, isTerm, nil); err != nil {
+ return "", fmt.Errorf("build image: %w", err)
+ }
+
+ // the first tag is the one we want
+ return buildOptions.Tags[0], nil
+}
+
+// CreateContainer fulfils a request for a container without starting it
+func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) {
+ // defer the close of the Docker client connection the soonest
+ defer p.Close()
+
+ var defaultNetwork string
+ defaultNetwork, err = p.ensureDefaultNetwork(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("ensure default network: %w", err)
+ }
+
+ // If default network is not bridge make sure it is attached to the request
+ // as container won't be attached to it automatically
+ // in case of Podman the bridge network is called 'podman' as 'bridge' would conflict
+ if defaultNetwork != p.defaultBridgeNetworkName {
+ isAttached := false
+ for _, net := range req.Networks {
+ if net == defaultNetwork {
+ isAttached = true
+ break
+ }
+ }
+
+ if !isAttached {
+ req.Networks = append(req.Networks, defaultNetwork)
+ }
+ }
+
+ imageName := req.Image
+
+ env := []string{}
+ for envKey, envVar := range req.Env {
+ env = append(env, envKey+"="+envVar)
+ }
+
+ if req.Labels == nil {
+ req.Labels = make(map[string]string)
+ }
+
+ if err = req.Validate(); err != nil {
+ return nil, err
+ }
+
+ // always append the hub substitutor after the user-defined ones
+ req.ImageSubstitutors = append(req.ImageSubstitutors, newPrependHubRegistry(p.config.HubImageNamePrefix))
+
+ var platform *specs.Platform
+
+ defaultHooks := []ContainerLifecycleHooks{
+ DefaultLoggingHook(p.Logger),
+ }
+
+ origLifecycleHooks := req.LifecycleHooks
+ req.LifecycleHooks = []ContainerLifecycleHooks{
+ combineContainerHooks(defaultHooks, req.LifecycleHooks),
+ }
+
+ if req.ShouldBuildImage() {
+ if err = req.buildingHook(ctx); err != nil {
+ return nil, err
+ }
+
+ imageName, err = p.BuildImage(ctx, &req)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Image = imageName
+ if err = req.builtHook(ctx); err != nil {
+ return nil, err
+ }
+ } else {
+ for _, is := range req.ImageSubstitutors {
+ modifiedTag, err := is.Substitute(imageName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to substitute image %s with %s: %w", imageName, is.Description(), err)
+ }
+
+ if modifiedTag != imageName {
+ p.Logger.Printf("✍🏼 Replacing image with %s. From: %s to %s\n", is.Description(), imageName, modifiedTag)
+ imageName = modifiedTag
+ }
+ }
+
+ if req.ImagePlatform != "" {
+ p, err := platforms.Parse(req.ImagePlatform)
+ if err != nil {
+ return nil, fmt.Errorf("invalid platform %s: %w", req.ImagePlatform, err)
+ }
+ platform = &p
+ }
+
+ var shouldPullImage bool
+
+ if req.AlwaysPullImage {
+ shouldPullImage = true // If requested always attempt to pull image
+ } else {
+ img, err := p.client.ImageInspect(ctx, imageName)
+ if err != nil {
+ if !client.IsErrNotFound(err) {
+ return nil, err
+ }
+ shouldPullImage = true
+ }
+ if platform != nil && (img.Architecture != platform.Architecture || img.Os != platform.OS) {
+ shouldPullImage = true
+ }
+ }
+
+ if shouldPullImage {
+ pullOpt := image.PullOptions{
+ Platform: req.ImagePlatform, // may be empty
+ }
+ if err := p.attemptToPullImage(ctx, imageName, pullOpt); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if !isReaperImage(imageName) {
+ // Add the labels that identify this as a testcontainers container and
+ // allow the reaper to terminate it if requested.
+ AddGenericLabels(req.Labels)
+ }
+
+ dockerInput := &container.Config{
+ Entrypoint: req.Entrypoint,
+ Image: imageName,
+ Env: env,
+ Labels: req.Labels,
+ Cmd: req.Cmd,
+ Hostname: req.Hostname,
+ User: req.User,
+ WorkingDir: req.WorkingDir,
+ }
+
+ hostConfig := &container.HostConfig{
+ Privileged: req.Privileged,
+ ShmSize: req.ShmSize,
+ Tmpfs: req.Tmpfs,
+ }
+
+ networkingConfig := &network.NetworkingConfig{}
+
+ // default hooks include logger hook and pre-create hook
+ defaultHooks = append(defaultHooks,
+ defaultPreCreateHook(p, dockerInput, hostConfig, networkingConfig),
+ defaultCopyFileToContainerHook(req.Files),
+ defaultLogConsumersHook(req.LogConsumerCfg),
+ defaultReadinessHook(),
+ )
+
+ // in the case the container needs to access a local port
+ // we need to forward the local port to the container
+ if len(req.HostAccessPorts) > 0 {
+ // a container lifecycle hook will be added, which will expose the host ports to the container
+ // using a SSHD server running in a container. The SSHD server will be started and will
+ // forward the host ports to the container ports.
+ sshdForwardPortsHook, err := exposeHostPorts(ctx, &req, req.HostAccessPorts...)
+ if err != nil {
+ return nil, fmt.Errorf("expose host ports: %w", err)
+ }
+
+ defer func() {
+ if err != nil && con == nil {
+ // Container setup failed so ensure we clean up the sshd container too.
+ ctr := &DockerContainer{
+ provider: p,
+ logger: p.Logger,
+ lifecycleHooks: []ContainerLifecycleHooks{sshdForwardPortsHook},
+ }
+ err = errors.Join(ctr.terminatingHook(ctx))
+ }
+ }()
+
+ defaultHooks = append(defaultHooks, sshdForwardPortsHook)
+ }
+
+ // Combine with the original LifecycleHooks to avoid duplicate logging hooks.
+ req.LifecycleHooks = []ContainerLifecycleHooks{
+ combineContainerHooks(defaultHooks, origLifecycleHooks),
+ }
+
+ err = req.creatingHook(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := p.client.ContainerCreate(ctx, dockerInput, hostConfig, networkingConfig, platform, req.Name)
+ if err != nil {
+ return nil, fmt.Errorf("container create: %w", err)
+ }
+
+ // #248: If there is more than one network specified in the request attach newly created container to them one by one
+ if len(req.Networks) > 1 {
+ for _, n := range req.Networks[1:] {
+ nw, err := p.GetNetwork(ctx, NetworkRequest{
+ Name: n,
+ })
+ if err == nil {
+ endpointSetting := network.EndpointSettings{
+ Aliases: req.NetworkAliases[n],
+ }
+ err = p.client.NetworkConnect(ctx, nw.ID, resp.ID, &endpointSetting)
+ if err != nil {
+ return nil, fmt.Errorf("network connect: %w", err)
+ }
+ }
+ }
+ }
+
+ // This should match the fields set in ContainerFromDockerResponse.
+ ctr := &DockerContainer{
+ ID: resp.ID,
+ WaitingFor: req.WaitingFor,
+ Image: imageName,
+ imageWasBuilt: req.ShouldBuildImage(),
+ keepBuiltImage: req.ShouldKeepBuiltImage(),
+ sessionID: req.sessionID(),
+ exposedPorts: req.ExposedPorts,
+ provider: p,
+ logger: p.Logger,
+ lifecycleHooks: req.LifecycleHooks,
+ }
+
+ if err = ctr.connectReaper(ctx); err != nil {
+ return ctr, err // No wrap as it would stutter.
+ }
+
+ // Wrapped so the returned error is passed to the cleanup function.
+ defer func(ctr *DockerContainer) {
+ ctr.cleanupTermSignal(err)
+ }(ctr)
+
+ if err = ctr.createdHook(ctx); err != nil {
+ // Return the container to allow caller to clean up.
+ return ctr, fmt.Errorf("created hook: %w", err)
+ }
+
+ return ctr, nil
+}
+
+func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*container.Summary, error) {
+ if name == "" {
+ return nil, nil
+ }
+
+ // Note that, 'name' filter will use regex to find the containers
+ filter := filters.NewArgs(filters.Arg("name", fmt.Sprintf("^%s$", name)))
+ containers, err := p.client.ContainerList(ctx, container.ListOptions{Filters: filter})
+ if err != nil {
+ return nil, fmt.Errorf("container list: %w", err)
+ }
+ defer p.Close()
+
+ if len(containers) > 0 {
+ return &containers[0], nil
+ }
+ return nil, nil
+}
+
+func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) (*container.Summary, error) {
+ return backoff.RetryNotifyWithData(
+ func() (*container.Summary, error) {
+ c, err := p.findContainerByName(ctx, name)
+ if err != nil {
+ if !errdefs.IsNotFound(err) && isPermanentClientError(err) {
+ return nil, backoff.Permanent(err)
+ }
+ return nil, err
+ }
+
+ if c == nil {
+ return nil, errdefs.NotFound(fmt.Errorf("container %s not found", name))
+ }
+ return c, nil
+ },
+ backoff.WithContext(backoff.NewExponentialBackOff(), ctx),
+ func(err error, duration time.Duration) {
+ if errdefs.IsNotFound(err) {
+ return
+ }
+ p.Logger.Printf("Waiting for container. Got an error: %v; Retrying in %d seconds", err, duration/time.Second)
+ },
+ )
+}
+
+func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) {
+ c, err := p.findContainerByName(ctx, req.Name)
+ if err != nil {
+ return nil, err
+ }
+ if c == nil {
+ createdContainer, err := p.CreateContainer(ctx, req)
+ if err == nil {
+ return createdContainer, nil
+ }
+ if !createContainerFailDueToNameConflictRegex.MatchString(err.Error()) {
+ return nil, err
+ }
+ c, err = p.waitContainerCreation(ctx, req.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ sessionID := req.sessionID()
+
+ var termSignal chan bool
+ if !p.config.RyukDisabled {
+ r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
+ if err != nil {
+ return nil, fmt.Errorf("reaper: %w", err)
+ }
+
+ termSignal, err := r.Connect()
+ if err != nil {
+ return nil, fmt.Errorf("reaper connect: %w", err)
+ }
+
+ // Cleanup on error.
+ defer func() {
+ if err != nil {
+ termSignal <- true
+ }
+ }()
+ }
+
+ // default hooks include logger hook and pre-create hook
+ defaultHooks := []ContainerLifecycleHooks{
+ DefaultLoggingHook(p.Logger),
+ defaultReadinessHook(),
+ defaultLogConsumersHook(req.LogConsumerCfg),
+ }
+
+ dc := &DockerContainer{
+ ID: c.ID,
+ WaitingFor: req.WaitingFor,
+ Image: c.Image,
+ sessionID: sessionID,
+ exposedPorts: req.ExposedPorts,
+ provider: p,
+ terminationSignal: termSignal,
+ logger: p.Logger,
+ lifecycleHooks: []ContainerLifecycleHooks{combineContainerHooks(defaultHooks, req.LifecycleHooks)},
+ }
+
+ err = dc.startedHook(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ dc.isRunning = true
+
+ err = dc.readiedHook(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return dc, nil
+}
+
+// attemptToPullImage tries to pull the image while respecting the ctx cancellations.
+// Besides, if the image cannot be pulled due to ErrorNotFound then no need to retry but terminate immediately.
+func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt image.PullOptions) error {
+ registry, imageAuth, err := DockerImageAuth(ctx, tag)
+ if err != nil {
+ p.Logger.Printf("Failed to get image auth for %s. Setting empty credentials for the image: %s. Error is: %s", registry, tag, err)
+ } else {
+ // see https://github.com/docker/docs/blob/e8e1204f914767128814dca0ea008644709c117f/engine/api/sdk/examples.md?plain=1#L649-L657
+ encodedJSON, err := json.Marshal(imageAuth)
+ if err != nil {
+ p.Logger.Printf("Failed to marshal image auth. Setting empty credentials for the image: %s. Error is: %s", tag, err)
+ } else {
+ pullOpt.RegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON)
+ }
+ }
+
+ var pull io.ReadCloser
+ err = backoff.RetryNotify(
+ func() error {
+ pull, err = p.client.ImagePull(ctx, tag, pullOpt)
+ if err != nil {
+ if isPermanentClientError(err) {
+ return backoff.Permanent(err)
+ }
+ return err
+ }
+ defer p.Close()
+
+ return nil
+ },
+ backoff.WithContext(backoff.NewExponentialBackOff(), ctx),
+ func(err error, _ time.Duration) {
+ p.Logger.Printf("Failed to pull image: %s, will retry", err)
+ },
+ )
+ if err != nil {
+ return err
+ }
+ defer pull.Close()
+
+ // download of docker image finishes at EOF of the pull request
+ _, err = io.ReadAll(pull)
+ return err
+}
+
+// Health measure the healthiness of the provider. Right now we leverage the
+// docker-client Info endpoint to see if the daemon is reachable.
+func (p *DockerProvider) Health(ctx context.Context) error {
+ _, err := p.client.Info(ctx)
+ defer p.Close()
+
+ return err
+}
+
+// RunContainer takes a RequestContainer as input and it runs a container via the docker sdk
+func (p *DockerProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) {
+ c, err := p.CreateContainer(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.Start(ctx); err != nil {
+ return c, fmt.Errorf("%w: could not start container", err)
+ }
+
+ return c, nil
+}
+
+// Config provides the TestcontainersConfig read from $HOME/.testcontainers.properties or
+// the environment variables
+func (p *DockerProvider) Config() TestcontainersConfig {
+ return TestcontainersConfig{
+ Host: p.config.Host,
+ TLSVerify: p.config.TLSVerify,
+ CertPath: p.config.CertPath,
+ RyukDisabled: p.config.RyukDisabled,
+ RyukPrivileged: p.config.RyukPrivileged,
+ Config: p.config,
+ }
+}
+
+// DaemonHost gets the host or ip of the Docker daemon where ports are exposed on
+// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel
+// You can use the "TESTCONTAINERS_HOST_OVERRIDE" env variable to set this yourself
+func (p *DockerProvider) DaemonHost(ctx context.Context) (string, error) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ return p.daemonHostLocked(ctx)
+}
+
+func (p *DockerProvider) daemonHostLocked(ctx context.Context) (string, error) {
+ if p.hostCache != "" {
+ return p.hostCache, nil
+ }
+
+ host, exists := os.LookupEnv("TESTCONTAINERS_HOST_OVERRIDE")
+ if exists {
+ p.hostCache = host
+ return p.hostCache, nil
+ }
+
+ // infer from Docker host
+ daemonURL, err := url.Parse(p.client.DaemonHost())
+ if err != nil {
+ return "", err
+ }
+ defer p.Close()
+
+ switch daemonURL.Scheme {
+ case "http", "https", "tcp":
+ p.hostCache = daemonURL.Hostname()
+ case "unix", "npipe":
+ if core.InAContainer() {
+ defaultNetwork, err := p.ensureDefaultNetworkLocked(ctx)
+ if err != nil {
+ return "", fmt.Errorf("ensure default network: %w", err)
+ }
+ ip, err := p.getGatewayIP(ctx, defaultNetwork)
+ if err != nil {
+ ip, err = core.DefaultGatewayIP()
+ if err != nil {
+ ip = "localhost"
+ }
+ }
+ p.hostCache = ip
+ } else {
+ p.hostCache = "localhost"
+ }
+ default:
+ return "", errors.New("could not determine host through env or docker host")
+ }
+
+ return p.hostCache, nil
+}
+
+// Deprecated: use network.New instead
+// CreateNetwork returns the object representing a new network identified by its name
+func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) {
+ // defer the close of the Docker client connection the soonest
+ defer p.Close()
+
+ if _, err = p.ensureDefaultNetwork(ctx); err != nil {
+ return nil, fmt.Errorf("ensure default network: %w", err)
+ }
+
+ if req.Labels == nil {
+ req.Labels = make(map[string]string)
+ }
+
+ nc := network.CreateOptions{
+ Driver: req.Driver,
+ Internal: req.Internal,
+ EnableIPv6: req.EnableIPv6,
+ Attachable: req.Attachable,
+ Labels: req.Labels,
+ IPAM: req.IPAM,
+ }
+
+ sessionID := req.sessionID()
+
+ var termSignal chan bool
+ if !p.config.RyukDisabled {
+ r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
+ if err != nil {
+ return nil, fmt.Errorf("reaper: %w", err)
+ }
+
+ termSignal, err := r.Connect()
+ if err != nil {
+ return nil, fmt.Errorf("reaper connect: %w", err)
+ }
+
+ // Cleanup on error.
+ defer func() {
+ if err != nil {
+ termSignal <- true
+ }
+ }()
+ }
+
+ // add the labels that the reaper will use to terminate the network to the request
+ core.AddDefaultLabels(sessionID, req.Labels)
+
+ response, err := p.client.NetworkCreate(ctx, req.Name, nc)
+ if err != nil {
+ return &DockerNetwork{}, fmt.Errorf("create network: %w", err)
+ }
+
+ n := &DockerNetwork{
+ ID: response.ID,
+ Driver: req.Driver,
+ Name: req.Name,
+ terminationSignal: termSignal,
+ provider: p,
+ }
+
+ return n, nil
+}
+
+// GetNetwork returns the object representing the network identified by its name
+func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (network.Inspect, error) {
+ networkResource, err := p.client.NetworkInspect(ctx, req.Name, network.InspectOptions{
+ Verbose: true,
+ })
+ if err != nil {
+ return network.Inspect{}, err
+ }
+
+ return networkResource, err
+}
+
+func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) {
+ // Use a default network as defined in the DockerProvider
+ defaultNetwork, err := p.ensureDefaultNetwork(ctx)
+ if err != nil {
+ return "", fmt.Errorf("ensure default network: %w", err)
+ }
+ return p.getGatewayIP(ctx, defaultNetwork)
+}
+
+func (p *DockerProvider) getGatewayIP(ctx context.Context, defaultNetwork string) (string, error) {
+ nw, err := p.GetNetwork(ctx, NetworkRequest{Name: defaultNetwork})
+ if err != nil {
+ return "", err
+ }
+
+ var ip string
+ for _, cfg := range nw.IPAM.Config {
+ if cfg.Gateway != "" {
+ ip = cfg.Gateway
+ break
+ }
+ }
+ if ip == "" {
+ return "", errors.New("Failed to get gateway IP from network settings")
+ }
+
+ return ip, nil
+}
+
+// ensureDefaultNetwork ensures that defaultNetwork is set and creates
+// it if it does not exist, returning its value.
+// It is safe to call this method concurrently.
+func (p *DockerProvider) ensureDefaultNetwork(ctx context.Context) (string, error) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ return p.ensureDefaultNetworkLocked(ctx)
+}
+
+func (p *DockerProvider) ensureDefaultNetworkLocked(ctx context.Context) (string, error) {
+ if p.defaultNetwork != "" {
+ // Already set.
+ return p.defaultNetwork, nil
+ }
+
+ networkResources, err := p.client.NetworkList(ctx, network.ListOptions{})
+ if err != nil {
+ return "", fmt.Errorf("network list: %w", err)
+ }
+
+ // TODO: remove once we have docker context support via #2810
+ // Prefer the default bridge network if it exists.
+ // This makes the results stable as network list order is not guaranteed.
+ for _, net := range networkResources {
+ switch net.Name {
+ case p.defaultBridgeNetworkName:
+ p.defaultNetwork = p.defaultBridgeNetworkName
+ return p.defaultNetwork, nil
+ case ReaperDefault:
+ p.defaultNetwork = ReaperDefault
+ }
+ }
+
+ if p.defaultNetwork != "" {
+ return p.defaultNetwork, nil
+ }
+
+ // Create a bridge network for the container communications.
+ _, err = p.client.NetworkCreate(ctx, ReaperDefault, network.CreateOptions{
+ Driver: Bridge,
+ Attachable: true,
+ Labels: GenericLabels(),
+ })
+ // If the network already exists, we can ignore the error as that can
+ // happen if we are running multiple tests in parallel and we only
+ // need to ensure that the network exists.
+ if err != nil && !errdefs.IsConflict(err) {
+ return "", fmt.Errorf("network create: %w", err)
+ }
+
+ p.defaultNetwork = ReaperDefault
+
+ return p.defaultNetwork, nil
+}
+
+// ContainerFromType builds a Docker container struct from the response of the Docker API
+func (p *DockerProvider) ContainerFromType(ctx context.Context, response container.Summary) (ctr *DockerContainer, err error) {
+ exposedPorts := make([]string, len(response.Ports))
+ for i, port := range response.Ports {
+ exposedPorts[i] = fmt.Sprintf("%d/%s", port.PublicPort, port.Type)
+ }
+
+ // This should match the fields set in CreateContainer.
+ ctr = &DockerContainer{
+ ID: response.ID,
+ Image: response.Image,
+ imageWasBuilt: false,
+ sessionID: response.Labels[core.LabelSessionID],
+ isRunning: response.State == "running",
+ exposedPorts: exposedPorts,
+ provider: p,
+ logger: p.Logger,
+ lifecycleHooks: []ContainerLifecycleHooks{
+ DefaultLoggingHook(p.Logger),
+ },
+ }
+
+ if err = ctr.connectReaper(ctx); err != nil {
+ return nil, err
+ }
+
+ // Wrapped so the returned error is passed to the cleanup function.
+ defer func(ctr *DockerContainer) {
+ ctr.cleanupTermSignal(err)
+ }(ctr)
+
+ // populate the raw representation of the container
+ jsonRaw, err := ctr.inspectRawContainer(ctx)
+ if err != nil {
+ // Return the container to allow caller to clean up.
+ return ctr, fmt.Errorf("inspect raw container: %w", err)
+ }
+
+ // the health status of the container, if any
+ if health := jsonRaw.State.Health; health != nil {
+ ctr.healthStatus = health.Status
+ }
+
+ return ctr, nil
+}
+
+// ListImages list images from the provider. If an image has multiple Tags, each tag is reported
+// individually with the same ID and same labels
+func (p *DockerProvider) ListImages(ctx context.Context) ([]ImageInfo, error) {
+ images := []ImageInfo{}
+
+ imageList, err := p.client.ImageList(ctx, image.ListOptions{})
+ if err != nil {
+ return images, fmt.Errorf("listing images %w", err)
+ }
+
+ for _, img := range imageList {
+ for _, tag := range img.RepoTags {
+ images = append(images, ImageInfo{ID: img.ID, Name: tag})
+ }
+ }
+
+ return images, nil
+}
+
+// SaveImages exports a list of images as an uncompressed tar
+func (p *DockerProvider) SaveImages(ctx context.Context, output string, images ...string) error {
+ outputFile, err := os.Create(output)
+ if err != nil {
+ return fmt.Errorf("opening output file %w", err)
+ }
+ defer func() {
+ _ = outputFile.Close()
+ }()
+
+ imageReader, err := p.client.ImageSave(ctx, images)
+ if err != nil {
+ return fmt.Errorf("saving images %w", err)
+ }
+ defer func() {
+ _ = imageReader.Close()
+ }()
+
+ // Attempt optimized readFrom, implemented in linux
+ _, err = outputFile.ReadFrom(imageReader)
+ if err != nil {
+ return fmt.Errorf("writing images to output %w", err)
+ }
+
+ return nil
+}
+
+// PullImage pulls image from registry
+func (p *DockerProvider) PullImage(ctx context.Context, img string) error {
+ return p.attemptToPullImage(ctx, img, image.PullOptions{})
+}
+
+var permanentClientErrors = []func(error) bool{
+ errdefs.IsNotFound,
+ errdefs.IsInvalidParameter,
+ errdefs.IsUnauthorized,
+ errdefs.IsForbidden,
+ errdefs.IsNotImplemented,
+ errdefs.IsSystem,
+}
+
+func isPermanentClientError(err error) bool {
+ for _, isErrFn := range permanentClientErrors {
+ if isErrFn(err) {
+ return true
+ }
+ }
+ return false
+}
+
+func tryClose(r io.Reader) {
+ rc, ok := r.(io.Closer)
+ if ok {
+ _ = rc.Close()
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go
new file mode 100644
index 0000000..58b3ef2
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go
@@ -0,0 +1,282 @@
+package testcontainers
+
+import (
+ "context"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "sync"
+
+ "github.com/cpuguy83/dockercfg"
+ "github.com/docker/docker/api/types/registry"
+
+ "github.com/testcontainers/testcontainers-go/internal/core"
+)
+
+// defaultRegistryFn is variable overwritten in tests to check for behaviour with different default values.
+var defaultRegistryFn = defaultRegistry
+
+// getRegistryCredentials is a variable overwritten in tests to mock the dockercfg.GetRegistryCredentials function.
+var getRegistryCredentials = dockercfg.GetRegistryCredentials
+
+// DockerImageAuth returns the auth config for the given Docker image, extracting first its Docker registry.
+// Finally, it will use the credential helpers to extract the information from the docker config file
+// for that registry, if it exists.
+func DockerImageAuth(ctx context.Context, image string) (string, registry.AuthConfig, error) {
+ configs, err := getDockerAuthConfigs()
+ if err != nil {
+ reg := core.ExtractRegistry(image, defaultRegistryFn(ctx))
+ return reg, registry.AuthConfig{}, err
+ }
+
+ return dockerImageAuth(ctx, image, configs)
+}
+
+// dockerImageAuth returns the auth config for the given Docker image.
+func dockerImageAuth(ctx context.Context, image string, configs map[string]registry.AuthConfig) (string, registry.AuthConfig, error) {
+ defaultRegistry := defaultRegistryFn(ctx)
+ reg := core.ExtractRegistry(image, defaultRegistry)
+
+ if cfg, ok := getRegistryAuth(reg, configs); ok {
+ return reg, cfg, nil
+ }
+
+ return reg, registry.AuthConfig{}, dockercfg.ErrCredentialsNotFound
+}
+
+func getRegistryAuth(reg string, cfgs map[string]registry.AuthConfig) (registry.AuthConfig, bool) {
+ if cfg, ok := cfgs[reg]; ok {
+ return cfg, true
+ }
+
+ // fallback match using authentication key host
+ for k, cfg := range cfgs {
+ keyURL, err := url.Parse(k)
+ if err != nil {
+ continue
+ }
+
+ host := keyURL.Host
+ if keyURL.Scheme == "" {
+ // url.Parse: The url may be relative (a path, without a host) [...]
+ host = keyURL.Path
+ }
+
+ if host == reg {
+ return cfg, true
+ }
+ }
+
+ return registry.AuthConfig{}, false
+}
+
+// defaultRegistry returns the default registry to use when pulling images
+// It will use the docker daemon to get the default registry, returning "https://index.docker.io/v1/" if
+// it fails to get the information from the daemon
+func defaultRegistry(ctx context.Context) string {
+ client, err := NewDockerClientWithOpts(ctx)
+ if err != nil {
+ return core.IndexDockerIO
+ }
+ defer client.Close()
+
+ info, err := client.Info(ctx)
+ if err != nil {
+ return core.IndexDockerIO
+ }
+
+ return info.IndexServerAddress
+}
+
+// authConfigResult is a result looking up auth details for key.
+type authConfigResult struct {
+ key string
+ cfg registry.AuthConfig
+ err error
+}
+
+// credentialsCache is a cache for registry credentials.
+type credentialsCache struct {
+ entries map[string]credentials
+ mtx sync.RWMutex
+}
+
+// credentials represents the username and password for a registry.
+type credentials struct {
+ username string
+ password string
+}
+
+var creds = &credentialsCache{entries: map[string]credentials{}}
+
+// AuthConfig updates the details in authConfig for the given hostname
+// as determined by the details in configKey.
+func (c *credentialsCache) AuthConfig(hostname, configKey string, authConfig *registry.AuthConfig) error {
+ u, p, err := creds.get(hostname, configKey)
+ if err != nil {
+ return err
+ }
+
+ if u != "" {
+ authConfig.Username = u
+ authConfig.Password = p
+ } else {
+ authConfig.IdentityToken = p
+ }
+
+ return nil
+}
+
+// get returns the username and password for the given hostname
+// as determined by the details in configPath.
+// If the username is empty, the password is an identity token.
+func (c *credentialsCache) get(hostname, configKey string) (string, string, error) {
+ key := configKey + ":" + hostname
+ c.mtx.RLock()
+ entry, ok := c.entries[key]
+ c.mtx.RUnlock()
+
+ if ok {
+ return entry.username, entry.password, nil
+ }
+
+ // No entry found, request and cache.
+ user, password, err := getRegistryCredentials(hostname)
+ if err != nil {
+ return "", "", fmt.Errorf("getting credentials for %s: %w", hostname, err)
+ }
+
+ c.mtx.Lock()
+ c.entries[key] = credentials{username: user, password: password}
+ c.mtx.Unlock()
+
+ return user, password, nil
+}
+
+// configKey returns a key to use for caching credentials based on
+// the contents of the currently active config.
+func configKey(cfg *dockercfg.Config) (string, error) {
+ h := md5.New()
+ if err := json.NewEncoder(h).Encode(cfg); err != nil {
+ return "", fmt.Errorf("encode config: %w", err)
+ }
+
+ return hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// getDockerAuthConfigs returns a map with the auth configs from the docker config file
+// using the registry as the key
+func getDockerAuthConfigs() (map[string]registry.AuthConfig, error) {
+ cfg, err := getDockerConfig()
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return map[string]registry.AuthConfig{}, nil
+ }
+
+ return nil, err
+ }
+
+ key, err := configKey(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ size := len(cfg.AuthConfigs) + len(cfg.CredentialHelpers)
+ cfgs := make(map[string]registry.AuthConfig, size)
+ results := make(chan authConfigResult, size)
+ var wg sync.WaitGroup
+ wg.Add(size)
+ for k, v := range cfg.AuthConfigs {
+ go func(k string, v dockercfg.AuthConfig) {
+ defer wg.Done()
+
+ ac := registry.AuthConfig{
+ Auth: v.Auth,
+ Email: v.Email,
+ IdentityToken: v.IdentityToken,
+ Password: v.Password,
+ RegistryToken: v.RegistryToken,
+ ServerAddress: v.ServerAddress,
+ Username: v.Username,
+ }
+
+ switch {
+ case ac.Username == "" && ac.Password == "":
+ // Look up credentials from the credential store.
+ if err := creds.AuthConfig(k, key, &ac); err != nil {
+ results <- authConfigResult{err: err}
+ return
+ }
+ case ac.Auth == "":
+ // Create auth from the username and password encoding.
+ ac.Auth = base64.StdEncoding.EncodeToString([]byte(ac.Username + ":" + ac.Password))
+ }
+
+ results <- authConfigResult{key: k, cfg: ac}
+ }(k, v)
+ }
+
+ // In the case where the auth field in the .docker/conf.json is empty, and the user has
+ // credential helpers registered the auth comes from there.
+ for k := range cfg.CredentialHelpers {
+ go func(k string) {
+ defer wg.Done()
+
+ var ac registry.AuthConfig
+ if err := creds.AuthConfig(k, key, &ac); err != nil {
+ results <- authConfigResult{err: err}
+ return
+ }
+
+ results <- authConfigResult{key: k, cfg: ac}
+ }(k)
+ }
+
+ go func() {
+ wg.Wait()
+ close(results)
+ }()
+
+ var errs []error
+ for result := range results {
+ if result.err != nil {
+ errs = append(errs, result.err)
+ continue
+ }
+
+ cfgs[result.key] = result.cfg
+ }
+
+ if len(errs) > 0 {
+ return nil, errors.Join(errs...)
+ }
+
+ return cfgs, nil
+}
+
+// getDockerConfig returns the docker config file. It will internally check, in this particular order:
+// 1. the DOCKER_AUTH_CONFIG environment variable, unmarshalling it into a dockercfg.Config
+// 2. the DOCKER_CONFIG environment variable, as the path to the config file
+// 3. else it will load the default config file, which is ~/.docker/config.json
+func getDockerConfig() (*dockercfg.Config, error) {
+ if env := os.Getenv("DOCKER_AUTH_CONFIG"); env != "" {
+ var cfg dockercfg.Config
+ if err := json.Unmarshal([]byte(env), &cfg); err != nil {
+ return nil, fmt.Errorf("unmarshal DOCKER_AUTH_CONFIG: %w", err)
+ }
+
+ return &cfg, nil
+ }
+
+ cfg, err := dockercfg.LoadDefaultConfig()
+ if err != nil {
+ return nil, fmt.Errorf("load default config: %w", err)
+ }
+
+ return &cfg, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_client.go b/vendor/github.com/testcontainers/testcontainers-go/docker_client.go
new file mode 100644
index 0000000..dd6d90e
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/docker_client.go
@@ -0,0 +1,143 @@
+package testcontainers
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/system"
+ "github.com/docker/docker/client"
+
+ "github.com/testcontainers/testcontainers-go/internal"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+// DockerClient is a wrapper around the docker client that is used by testcontainers-go.
+// It implements the SystemAPIClient interface in order to cache the docker info and reuse it.
+type DockerClient struct {
+ *client.Client // client is embedded into our own client
+}
+
+var (
+ // dockerInfo stores the docker info to be reused in the Info method
+ dockerInfo system.Info
+ dockerInfoSet bool
+ dockerInfoLock sync.Mutex
+)
+
+// implements SystemAPIClient interface
+var _ client.SystemAPIClient = &DockerClient{}
+
+// Events returns a channel to listen to events that happen to the docker daemon.
+func (c *DockerClient) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) {
+ return c.Client.Events(ctx, options)
+}
+
+// Info returns information about the docker server. The result of Info is cached
+// and reused every time Info is called.
+// It will also print out the docker server info, and the resolved Docker paths, to the default logger.
+func (c *DockerClient) Info(ctx context.Context) (system.Info, error) {
+ dockerInfoLock.Lock()
+ defer dockerInfoLock.Unlock()
+ if dockerInfoSet {
+ return dockerInfo, nil
+ }
+
+ info, err := c.Client.Info(ctx)
+ if err != nil {
+ return info, fmt.Errorf("failed to retrieve docker info: %w", err)
+ }
+ dockerInfo = info
+ dockerInfoSet = true
+
+ infoMessage := `%v - Connected to docker:
+ Server Version: %v
+ API Version: %v
+ Operating System: %v
+ Total Memory: %v MB%s
+ Testcontainers for Go Version: v%s
+ Resolved Docker Host: %s
+ Resolved Docker Socket Path: %s
+ Test SessionID: %s
+ Test ProcessID: %s
+`
+ infoLabels := ""
+ if len(dockerInfo.Labels) > 0 {
+ infoLabels = `
+ Labels:`
+ for _, lb := range dockerInfo.Labels {
+ infoLabels += "\n " + lb
+ }
+ }
+
+ log.Printf(infoMessage, packagePath,
+ dockerInfo.ServerVersion,
+ c.Client.ClientVersion(),
+ dockerInfo.OperatingSystem, dockerInfo.MemTotal/1024/1024,
+ infoLabels,
+ internal.Version,
+ core.MustExtractDockerHost(ctx),
+ core.MustExtractDockerSocket(ctx),
+ core.SessionID(),
+ core.ProcessID(),
+ )
+
+ return dockerInfo, nil
+}
+
+// RegistryLogin logs into a Docker registry.
+func (c *DockerClient) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) {
+ return c.Client.RegistryLogin(ctx, auth)
+}
+
+// DiskUsage returns the disk usage of all images.
+func (c *DockerClient) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) {
+ return c.Client.DiskUsage(ctx, options)
+}
+
+// Ping pings the docker server.
+func (c *DockerClient) Ping(ctx context.Context) (types.Ping, error) {
+ return c.Client.Ping(ctx)
+}
+
+// Deprecated: Use NewDockerClientWithOpts instead.
+func NewDockerClient() (*client.Client, error) {
+ cli, err := NewDockerClientWithOpts(context.Background())
+ if err != nil {
+ return nil, err
+ }
+
+ return cli.Client, nil
+}
+
+func NewDockerClientWithOpts(ctx context.Context, opt ...client.Opt) (*DockerClient, error) {
+ dockerClient, err := core.NewClient(ctx, opt...)
+ if err != nil {
+ return nil, err
+ }
+
+ tcClient := DockerClient{
+ Client: dockerClient,
+ }
+
+ if _, err = tcClient.Info(ctx); err != nil {
+ // Fallback to environment, including the original options
+ if len(opt) == 0 {
+ opt = []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()}
+ }
+
+ dockerClient, err := client.NewClientWithOpts(opt...)
+ if err != nil {
+ return nil, err
+ }
+
+ tcClient.Client = dockerClient
+ }
+ defer tcClient.Close()
+
+ return &tcClient, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go
new file mode 100644
index 0000000..7954b2b
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go
@@ -0,0 +1,140 @@
+package testcontainers
+
+import (
+ "github.com/docker/docker/api/types/mount"
+
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+var mountTypeMapping = map[MountType]mount.Type{
+ MountTypeBind: mount.TypeBind, // Deprecated, it will be removed in a future release
+ MountTypeVolume: mount.TypeVolume,
+ MountTypeTmpfs: mount.TypeTmpfs,
+ MountTypePipe: mount.TypeNamedPipe,
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+// BindMounter can optionally be implemented by mount sources
+// to support advanced scenarios based on mount.BindOptions
+type BindMounter interface {
+ GetBindOptions() *mount.BindOptions
+}
+
+// VolumeMounter can optionally be implemented by mount sources
+// to support advanced scenarios based on mount.VolumeOptions
+type VolumeMounter interface {
+ GetVolumeOptions() *mount.VolumeOptions
+}
+
+// TmpfsMounter can optionally be implemented by mount sources
+// to support advanced scenarios based on mount.TmpfsOptions
+type TmpfsMounter interface {
+ GetTmpfsOptions() *mount.TmpfsOptions
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+type DockerBindMountSource struct {
+ *mount.BindOptions
+
+ // HostPath is the path mounted into the container
+ // the same host path might be mounted to multiple locations within a single container
+ HostPath string
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+func (s DockerBindMountSource) Source() string {
+ return s.HostPath
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+func (DockerBindMountSource) Type() MountType {
+ return MountTypeBind
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+func (s DockerBindMountSource) GetBindOptions() *mount.BindOptions {
+ return s.BindOptions
+}
+
+type DockerVolumeMountSource struct {
+ *mount.VolumeOptions
+
+ // Name refers to the name of the volume to be mounted
+ // the same volume might be mounted to multiple locations within a single container
+ Name string
+}
+
+func (s DockerVolumeMountSource) Source() string {
+ return s.Name
+}
+
+func (DockerVolumeMountSource) Type() MountType {
+ return MountTypeVolume
+}
+
+func (s DockerVolumeMountSource) GetVolumeOptions() *mount.VolumeOptions {
+ return s.VolumeOptions
+}
+
+type DockerTmpfsMountSource struct {
+ GenericTmpfsMountSource
+ *mount.TmpfsOptions
+}
+
+func (s DockerTmpfsMountSource) GetTmpfsOptions() *mount.TmpfsOptions {
+ return s.TmpfsOptions
+}
+
+// PrepareMounts maps the given []ContainerMount to the corresponding
+// []mount.Mount for further processing
+func (m ContainerMounts) PrepareMounts() []mount.Mount {
+ return mapToDockerMounts(m)
+}
+
+// mapToDockerMounts maps the given []ContainerMount to the corresponding
+// []mount.Mount for further processing
+func mapToDockerMounts(containerMounts ContainerMounts) []mount.Mount {
+ mounts := make([]mount.Mount, 0, len(containerMounts))
+
+ for idx := range containerMounts {
+ m := containerMounts[idx]
+
+ var mountType mount.Type
+ if mt, ok := mountTypeMapping[m.Source.Type()]; ok {
+ mountType = mt
+ } else {
+ continue
+ }
+
+ containerMount := mount.Mount{
+ Type: mountType,
+ Source: m.Source.Source(),
+ ReadOnly: m.ReadOnly,
+ Target: m.Target.Target(),
+ }
+
+ switch typedMounter := m.Source.(type) {
+ case VolumeMounter:
+ containerMount.VolumeOptions = typedMounter.GetVolumeOptions()
+ case TmpfsMounter:
+ containerMount.TmpfsOptions = typedMounter.GetTmpfsOptions()
+ case BindMounter:
+ log.Printf("Mount type %s is not supported by Testcontainers for Go", m.Source.Type())
+ default:
+ // The provided source type has no custom options
+ }
+
+ if mountType == mount.TypeVolume {
+ if containerMount.VolumeOptions == nil {
+ containerMount.VolumeOptions = &mount.VolumeOptions{
+ Labels: make(map[string]string),
+ }
+ }
+ AddGenericLabels(containerMount.VolumeOptions.Labels)
+ }
+
+ mounts = append(mounts, containerMount)
+ }
+
+ return mounts
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go
new file mode 100644
index 0000000..9c852fb
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go
@@ -0,0 +1,129 @@
+package exec
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+// ProcessOptions defines options applicable to the reader processor
+type ProcessOptions struct {
+ ExecConfig container.ExecOptions
+ Reader io.Reader
+}
+
+// NewProcessOptions returns a new ProcessOptions instance
+// with the given command and default options:
+// - detach: false
+// - attach stdout: true
+// - attach stderr: true
+func NewProcessOptions(cmd []string) *ProcessOptions {
+ return &ProcessOptions{
+ ExecConfig: container.ExecOptions{
+ Cmd: cmd,
+ Detach: false,
+ AttachStdout: true,
+ AttachStderr: true,
+ },
+ }
+}
+
+// ProcessOption defines a common interface to modify the reader processor
+// These options can be passed to the Exec function in a variadic way to customize the returned Reader instance
+type ProcessOption interface {
+ Apply(opts *ProcessOptions)
+}
+
+type ProcessOptionFunc func(opts *ProcessOptions)
+
+func (fn ProcessOptionFunc) Apply(opts *ProcessOptions) {
+ fn(opts)
+}
+
+func WithUser(user string) ProcessOption {
+ return ProcessOptionFunc(func(opts *ProcessOptions) {
+ opts.ExecConfig.User = user
+ })
+}
+
+func WithWorkingDir(workingDir string) ProcessOption {
+ return ProcessOptionFunc(func(opts *ProcessOptions) {
+ opts.ExecConfig.WorkingDir = workingDir
+ })
+}
+
+func WithEnv(env []string) ProcessOption {
+ return ProcessOptionFunc(func(opts *ProcessOptions) {
+ opts.ExecConfig.Env = env
+ })
+}
+
+// safeBuffer is a goroutine safe buffer.
+type safeBuffer struct {
+ mtx sync.Mutex
+ buf bytes.Buffer
+ err error
+}
+
+// Error sets an error for the next read.
+func (sb *safeBuffer) Error(err error) {
+ sb.mtx.Lock()
+ defer sb.mtx.Unlock()
+
+ sb.err = err
+}
+
+// Write writes p to the buffer.
+// It is safe for concurrent use by multiple goroutines.
+func (sb *safeBuffer) Write(p []byte) (n int, err error) {
+ sb.mtx.Lock()
+ defer sb.mtx.Unlock()
+
+ return sb.buf.Write(p)
+}
+
+// Read reads up to len(p) bytes into p from the buffer.
+// It is safe for concurrent use by multiple goroutines.
+func (sb *safeBuffer) Read(p []byte) (n int, err error) {
+ sb.mtx.Lock()
+ defer sb.mtx.Unlock()
+
+ if sb.err != nil {
+ return 0, sb.err
+ }
+
+ return sb.buf.Read(p)
+}
+
+// Multiplexed returns a [ProcessOption] that configures the command execution
+// to combine stdout and stderr into a single stream without Docker's multiplexing headers.
+func Multiplexed() ProcessOption {
+ return ProcessOptionFunc(func(opts *ProcessOptions) {
+ // returning fast to bypass those options with a nil reader,
+ // which could be the case when other options are used
+ // to configure the exec creation.
+ if opts.Reader == nil {
+ return
+ }
+
+ done := make(chan struct{})
+
+ var outBuff safeBuffer
+ var errBuff safeBuffer
+ go func() {
+ defer close(done)
+ if _, err := stdcopy.StdCopy(&outBuff, &errBuff, opts.Reader); err != nil {
+ outBuff.Error(fmt.Errorf("copying output: %w", err))
+ return
+ }
+ }()
+
+ <-done
+
+ opts.Reader = io.MultiReader(&outBuff, &errBuff)
+ })
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/file.go b/vendor/github.com/testcontainers/testcontainers-go/file.go
new file mode 100644
index 0000000..9205208
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/file.go
@@ -0,0 +1,143 @@
+package testcontainers
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+func isDir(path string) (bool, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+
+ fileInfo, err := file.Stat()
+ if err != nil {
+ return false, err
+ }
+
+ if fileInfo.IsDir() {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// tarDir compress a directory using tar + gzip algorithms
+func tarDir(src string, fileMode int64) (*bytes.Buffer, error) {
+ // always pass src as absolute path
+ abs, err := filepath.Abs(src)
+ if err != nil {
+ return &bytes.Buffer{}, fmt.Errorf("error getting absolute path: %w", err)
+ }
+ src = abs
+
+ buffer := &bytes.Buffer{}
+
+ log.Printf(">> creating TAR file from directory: %s\n", src)
+
+ // tar > gzip > buffer
+ zr := gzip.NewWriter(buffer)
+ tw := tar.NewWriter(zr)
+
+ _, baseDir := filepath.Split(src)
+ // keep the path relative to the parent directory
+ index := strings.LastIndex(src, baseDir)
+
+ // walk through every file in the folder
+ err = filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error {
+ if errFn != nil {
+ return fmt.Errorf("error traversing the file system: %w", errFn)
+ }
+
+ // if a symlink, skip file
+ if fi.Mode().Type() == os.ModeSymlink {
+ log.Printf(">> skipping symlink: %s\n", file)
+ return nil
+ }
+
+ // generate tar header
+ header, err := tar.FileInfoHeader(fi, file)
+ if err != nil {
+ return fmt.Errorf("error getting file info header: %w", err)
+ }
+
+ // see https://pkg.go.dev/archive/tar#FileInfoHeader:
+ // Since fs.FileInfo's Name method only returns the base name of the file it describes,
+ // it may be necessary to modify Header.Name to provide the full path name of the file.
+ header.Name = filepath.ToSlash(file[index:])
+ header.Mode = fileMode
+
+ // write header
+ if err := tw.WriteHeader(header); err != nil {
+ return fmt.Errorf("error writing header: %w", err)
+ }
+
+ // if not a dir, write file content
+ if !fi.IsDir() {
+ data, err := os.Open(file)
+ if err != nil {
+ return fmt.Errorf("error opening file: %w", err)
+ }
+ defer data.Close()
+ if _, err := io.Copy(tw, data); err != nil {
+ return fmt.Errorf("error compressing file: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return buffer, err
+ }
+
+ // produce tar
+ if err := tw.Close(); err != nil {
+ return buffer, fmt.Errorf("error closing tar file: %w", err)
+ }
+ // produce gzip
+ if err := zr.Close(); err != nil {
+ return buffer, fmt.Errorf("error closing gzip file: %w", err)
+ }
+
+ return buffer, nil
+}
+
+// tarFile compress a single file using tar + gzip algorithms
+func tarFile(basePath string, fileContent func(tw io.Writer) error, fileContentSize int64, fileMode int64) (*bytes.Buffer, error) {
+ buffer := &bytes.Buffer{}
+
+ zr := gzip.NewWriter(buffer)
+ tw := tar.NewWriter(zr)
+
+ hdr := &tar.Header{
+ Name: basePath,
+ Mode: fileMode,
+ Size: fileContentSize,
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return buffer, err
+ }
+ if err := fileContent(tw); err != nil {
+ return buffer, err
+ }
+
+ // produce tar
+ if err := tw.Close(); err != nil {
+ return buffer, fmt.Errorf("error closing tar file: %w", err)
+ }
+ // produce gzip
+ if err := zr.Close(); err != nil {
+ return buffer, fmt.Errorf("error closing gzip file: %w", err)
+ }
+
+ return buffer, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/generate.go b/vendor/github.com/testcontainers/testcontainers-go/generate.go
new file mode 100644
index 0000000..19ae496
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/generate.go
@@ -0,0 +1,3 @@
+package testcontainers
+
+//go:generate mockery
diff --git a/vendor/github.com/testcontainers/testcontainers-go/generic.go b/vendor/github.com/testcontainers/testcontainers-go/generic.go
new file mode 100644
index 0000000..a081b52
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/generic.go
@@ -0,0 +1,119 @@
+package testcontainers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+var (
+ reuseContainerMx sync.Mutex
+ ErrReuseEmptyName = errors.New("with reuse option a container name mustn't be empty")
+)
+
+// GenericContainerRequest represents parameters to a generic container
+type GenericContainerRequest struct {
+ ContainerRequest // embedded request for provider
+ Started bool // whether to auto-start the container
+ ProviderType ProviderType // which provider to use, Docker if empty
+ Logger log.Logger // provide a container specific Logging - use default global logger if empty
+ Reuse bool // reuse an existing container if it exists or create a new one. a container name mustn't be empty
+}
+
+// Deprecated: will be removed in the future.
+// GenericNetworkRequest represents parameters to a generic network
+type GenericNetworkRequest struct {
+ NetworkRequest // embedded request for provider
+ ProviderType ProviderType // which provider to use, Docker if empty
+}
+
+// Deprecated: use network.New instead
+// GenericNetwork creates a generic network with parameters
+func GenericNetwork(ctx context.Context, req GenericNetworkRequest) (Network, error) {
+ provider, err := req.ProviderType.GetProvider()
+ if err != nil {
+ return nil, err
+ }
+ network, err := provider.CreateNetwork(ctx, req.NetworkRequest)
+ if err != nil {
+ return nil, fmt.Errorf("%w: failed to create network", err)
+ }
+
+ return network, nil
+}
+
+// GenericContainer creates a generic container with parameters
+func GenericContainer(ctx context.Context, req GenericContainerRequest) (Container, error) {
+ if req.Reuse && req.Name == "" {
+ return nil, ErrReuseEmptyName
+ }
+
+ logger := req.Logger
+ if logger == nil {
+ // Ensure there is always a non-nil logger by default
+ logger = log.Default()
+ }
+ provider, err := req.ProviderType.GetProvider(WithLogger(logger))
+ if err != nil {
+ return nil, fmt.Errorf("get provider: %w", err)
+ }
+ defer provider.Close()
+
+ var c Container
+ if req.Reuse {
+ // we must protect the reusability of the container in the case it's invoked
+ // in a parallel execution, via ParallelContainers or t.Parallel()
+ reuseContainerMx.Lock()
+ defer reuseContainerMx.Unlock()
+
+ c, err = provider.ReuseOrCreateContainer(ctx, req.ContainerRequest)
+ } else {
+ c, err = provider.CreateContainer(ctx, req.ContainerRequest)
+ }
+ if err != nil {
+ // At this point `c` might not be nil. Give the caller an opportunity to call Destroy on the container.
+ // TODO: Remove this debugging.
+ if strings.Contains(err.Error(), "toomanyrequests") {
+ // Debugging information for rate limiting.
+ cfg, err := getDockerConfig()
+ if err == nil {
+ fmt.Printf("XXX: too many requests: %+v", cfg)
+ }
+ }
+ return c, fmt.Errorf("create container: %w", err)
+ }
+
+ if req.Started && !c.IsRunning() {
+ if err := c.Start(ctx); err != nil {
+ return c, fmt.Errorf("start container: %w", err)
+ }
+ }
+ return c, nil
+}
+
+// GenericProvider represents an abstraction for container and network providers
+type GenericProvider interface {
+ ContainerProvider
+ NetworkProvider
+ ImageProvider
+}
+
+// GenericLabels returns a map of labels that can be used to identify resources
+// created by this library. This includes the standard LabelSessionID if the
+// reaper is enabled, otherwise this is excluded to prevent resources being
+// incorrectly reaped.
+func GenericLabels() map[string]string {
+ return core.DefaultLabels(core.SessionID())
+}
+
+// AddGenericLabels adds the generic labels to target.
+func AddGenericLabels(target map[string]string) {
+ for k, v := range GenericLabels() {
+ target[k] = v
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/image.go b/vendor/github.com/testcontainers/testcontainers-go/image.go
new file mode 100644
index 0000000..4816fb7
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/image.go
@@ -0,0 +1,18 @@
+package testcontainers
+
+import (
+ "context"
+)
+
+// ImageInfo represents summary information of an image
+type ImageInfo struct {
+ ID string
+ Name string
+}
+
+// ImageProvider allows manipulating images
+type ImageProvider interface {
+ ListImages(context.Context) ([]ImageInfo, error)
+ SaveImages(context.Context, string, ...string) error
+ PullImage(context.Context, string) error
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go
new file mode 100644
index 0000000..64f2f7f
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go
@@ -0,0 +1,185 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/magiconair/properties"
+)
+
+const ReaperDefaultImage = "testcontainers/ryuk:0.11.0"
+
+var (
+ tcConfig Config
+ tcConfigOnce = new(sync.Once)
+)
+
+// testcontainersConfig {
+
+// Config represents the configuration for Testcontainers.
+// User values are read from ~/.testcontainers.properties file which can be overridden
+// using the specified environment variables. For more information, see [Custom Configuration].
+//
+// The Ryuk prefixed fields controls the [Garbage Collector] feature, which ensures that
+// resources are cleaned up after the test execution.
+//
+// [Garbage Collector]: https://golang.testcontainers.org/features/garbage_collector/
+// [Custom Configuration]: https://golang.testcontainers.org/features/configuration/
+type Config struct {
+ // Host is the address of the Docker daemon.
+ //
+ // Environment variable: DOCKER_HOST
+ Host string `properties:"docker.host,default="`
+
+ // TLSVerify is a flag to enable or disable TLS verification when connecting to a Docker daemon.
+ //
+ // Environment variable: DOCKER_TLS_VERIFY
+ TLSVerify int `properties:"docker.tls.verify,default=0"`
+
+ // CertPath is the path to the directory containing the Docker certificates.
+ // This is used when connecting to a Docker daemon over TLS.
+ //
+ // Environment variable: DOCKER_CERT_PATH
+ CertPath string `properties:"docker.cert.path,default="`
+
+ // HubImageNamePrefix is the prefix used for the images pulled from the Docker Hub.
+ // This is useful when running tests in environments with restricted internet access.
+ //
+ // Environment variable: TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX
+ HubImageNamePrefix string `properties:"hub.image.name.prefix,default="`
+
+ // RyukDisabled is a flag to enable or disable the Garbage Collector.
+ // Setting this to true will prevent testcontainers from automatically cleaning up
+ // resources, which is particularly important in tests which timeout as they
+ // don't run test clean up.
+ //
+ // Environment variable: TESTCONTAINERS_RYUK_DISABLED
+ RyukDisabled bool `properties:"ryuk.disabled,default=false"`
+
+ // RyukPrivileged is a flag to enable or disable the privileged mode for the Garbage Collector container.
+ // Setting this to true will run the Garbage Collector container in privileged mode.
+ //
+ // Environment variable: TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED
+ RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"`
+
+ // RyukReconnectionTimeout is the time to wait before attempting to reconnect to the Garbage Collector container.
+ //
+ // Environment variable: RYUK_RECONNECTION_TIMEOUT
+ RyukReconnectionTimeout time.Duration `properties:"ryuk.reconnection.timeout,default=10s"`
+
+ // RyukConnectionTimeout is the time to wait before timing out when connecting to the Garbage Collector container.
+ //
+ // Environment variable: RYUK_CONNECTION_TIMEOUT
+ RyukConnectionTimeout time.Duration `properties:"ryuk.connection.timeout,default=1m"`
+
+ // RyukVerbose is a flag to enable or disable verbose logging for the Garbage Collector.
+ //
+ // Environment variable: RYUK_VERBOSE
+ RyukVerbose bool `properties:"ryuk.verbose,default=false"`
+
+ // TestcontainersHost is the address of the Testcontainers host.
+ //
+ // Environment variable: TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE
+ TestcontainersHost string `properties:"tc.host,default="`
+}
+
+// }
+
+// Read reads from testcontainers properties file, if it exists
+// it is possible that certain values get overridden when set as environment variables
+func Read() Config {
+ tcConfigOnce.Do(func() {
+ tcConfig = read()
+ })
+
+ return tcConfig
+}
+
+// Reset resets the singleton instance of the Config struct,
+// allowing to read the configuration again.
+// Handy for testing, so do not use it in production code
+// This function is not thread-safe
+func Reset() {
+ tcConfigOnce = new(sync.Once)
+}
+
+func read() Config {
+ config := Config{}
+
+ applyEnvironmentConfiguration := func(config Config) Config {
+ ryukDisabledEnv := os.Getenv("TESTCONTAINERS_RYUK_DISABLED")
+ if parseBool(ryukDisabledEnv) {
+ config.RyukDisabled = ryukDisabledEnv == "true"
+ }
+
+ hubImageNamePrefix := os.Getenv("TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX")
+ if hubImageNamePrefix != "" {
+ config.HubImageNamePrefix = hubImageNamePrefix
+ }
+
+ ryukPrivilegedEnv := os.Getenv("TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED")
+ if parseBool(ryukPrivilegedEnv) {
+ config.RyukPrivileged = ryukPrivilegedEnv == "true"
+ }
+
+ ryukVerboseEnv := readTestcontainersEnv("RYUK_VERBOSE")
+ if parseBool(ryukVerboseEnv) {
+ config.RyukVerbose = ryukVerboseEnv == "true"
+ }
+
+ ryukReconnectionTimeoutEnv := readTestcontainersEnv("RYUK_RECONNECTION_TIMEOUT")
+ if timeout, err := time.ParseDuration(ryukReconnectionTimeoutEnv); err == nil {
+ config.RyukReconnectionTimeout = timeout
+ }
+
+ ryukConnectionTimeoutEnv := readTestcontainersEnv("RYUK_CONNECTION_TIMEOUT")
+ if timeout, err := time.ParseDuration(ryukConnectionTimeoutEnv); err == nil {
+ config.RyukConnectionTimeout = timeout
+ }
+
+ return config
+ }
+
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return applyEnvironmentConfiguration(config)
+ }
+
+ tcProp := filepath.Join(home, ".testcontainers.properties")
+ // init from a file
+ properties, err := properties.LoadFile(tcProp, properties.UTF8)
+ if err != nil {
+ return applyEnvironmentConfiguration(config)
+ }
+
+ if err := properties.Decode(&config); err != nil {
+ fmt.Printf("invalid testcontainers properties file, returning an empty Testcontainers configuration: %v\n", err)
+ return applyEnvironmentConfiguration(config)
+ }
+
+ return applyEnvironmentConfiguration(config)
+}
+
+func parseBool(input string) bool {
+ _, err := strconv.ParseBool(input)
+ return err == nil
+}
+
+// readTestcontainersEnv reads the environment variable with the given name.
+// It checks for the environment variable with the given name first, and then
+// checks for the environment variable with the given name prefixed with "TESTCONTAINERS_".
+func readTestcontainersEnv(envVar string) string {
+ value := os.Getenv(envVar)
+ if value != "" {
+ return value
+ }
+
+ // TODO: remove this prefix after the next major release
+ const prefix string = "TESTCONTAINERS_"
+
+ return os.Getenv(prefix + envVar)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go
new file mode 100644
index 0000000..201d4b0
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go
@@ -0,0 +1,106 @@
+package core
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "os"
+
+ "github.com/google/uuid"
+ "github.com/shirou/gopsutil/v4/process"
+)
+
+// sessionID returns a unique session ID for the current test session. Because each Go package
+// will be run in a separate process, we need a way to identify the current test session.
+// By test session, we mean:
+// - a single "go test" invocation (including flags)
+// - a single "go test ./..." invocation (including flags)
+// - the execution of a single test or a set of tests using the IDE
+//
+// As a consequence, with the sole goal of aggregating test execution across multiple
+// packages, this function will use the parent process ID (pid) of the current process
+// and its creation date, to use it to generate a unique session ID. We are using the parent pid because
+// the current process will be a child process of:
+// - the process that is running the tests, e.g.: "go test";
+// - the process that is running the application in development mode, e.g. "go run main.go -tags dev";
+// - the process that is running the tests in the IDE, e.g.: "go test ./...".
+//
+// Finally, we will hash the combination of the "testcontainers-go:" string with the parent pid
+// and the creation date of that parent process to generate a unique session ID.
+//
+// This sessionID will be used to:
+// - identify the test session, aggregating the test execution of multiple packages in the same test session.
+// - tag the containers created by testcontainers-go, adding a label to the container with the session ID.
+var sessionID string
+
+// projectPath returns the current working directory of the parent test process running Testcontainers for Go.
+// If it's not possible to get that directory, the library will use the current working directory. If again
+// it's not possible to get the current working directory, the library will use a temporary directory.
+var projectPath string
+
+// processID returns a unique ID for the current test process. Because each Go package will be run in a separate process,
+// we need a way to identify the current test process, in the form of a UUID
+var processID string
+
+const sessionIDPlaceholder = "testcontainers-go:%d:%d"
+
+func init() {
+ processID = uuid.New().String()
+
+ parentPid := os.Getppid()
+ var createTime int64
+ fallbackCwd, err := os.Getwd()
+ if err != nil {
+ // very unlikely to fail, but if it does, we will use a temp dir
+ fallbackCwd = os.TempDir()
+ }
+
+ processes, err := process.Processes()
+ if err != nil {
+ sessionID = uuid.New().String()
+ projectPath = fallbackCwd
+ return
+ }
+
+ for _, p := range processes {
+ if int(p.Pid) != parentPid {
+ continue
+ }
+
+ cwd, err := p.Cwd()
+ if err != nil {
+ cwd = fallbackCwd
+ }
+ projectPath = cwd
+
+ t, err := p.CreateTime()
+ if err != nil {
+ sessionID = uuid.New().String()
+ return
+ }
+
+ createTime = t
+ break
+ }
+
+ hasher := sha256.New()
+ _, err = hasher.Write([]byte(fmt.Sprintf(sessionIDPlaceholder, parentPid, createTime)))
+ if err != nil {
+ sessionID = uuid.New().String()
+ return
+ }
+
+ sessionID = hex.EncodeToString(hasher.Sum(nil))
+}
+
+func ProcessID() string {
+ return processID
+}
+
+func ProjectPath() string {
+ return projectPath
+}
+
+func SessionID() string {
+ return sessionID
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/client.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/client.go
new file mode 100644
index 0000000..04a54bc
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/client.go
@@ -0,0 +1,50 @@
+package core
+
+import (
+ "context"
+ "path/filepath"
+
+ "github.com/docker/docker/client"
+
+ "github.com/testcontainers/testcontainers-go/internal"
+ "github.com/testcontainers/testcontainers-go/internal/config"
+)
+
+// NewClient returns a new docker client extracting the docker host from the different alternatives
+func NewClient(ctx context.Context, ops ...client.Opt) (*client.Client, error) {
+ tcConfig := config.Read()
+
+ dockerHost := MustExtractDockerHost(ctx)
+
+ opts := []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()}
+ if dockerHost != "" {
+ opts = append(opts, client.WithHost(dockerHost))
+
+ // for further information, read https://docs.docker.com/engine/security/protect-access/
+ if tcConfig.TLSVerify == 1 {
+ cacertPath := filepath.Join(tcConfig.CertPath, "ca.pem")
+ certPath := filepath.Join(tcConfig.CertPath, "cert.pem")
+ keyPath := filepath.Join(tcConfig.CertPath, "key.pem")
+
+ opts = append(opts, client.WithTLSClientConfig(cacertPath, certPath, keyPath))
+ }
+ }
+
+ opts = append(opts, client.WithHTTPHeaders(
+ map[string]string{
+ "x-tc-pp": ProjectPath(),
+ "x-tc-sid": SessionID(),
+ "User-Agent": "tc-go/" + internal.Version,
+ }),
+ )
+
+ // passed options have priority over the default ones
+ opts = append(opts, ops...)
+
+ cli, err := client.NewClientWithOpts(opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return cli, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go
new file mode 100644
index 0000000..fc06ea8
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go
@@ -0,0 +1,329 @@
+package core
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+
+ "github.com/docker/docker/client"
+
+ "github.com/testcontainers/testcontainers-go/internal/config"
+)
+
+type dockerHostContext string
+
+var DockerHostContextKey = dockerHostContext("docker_host")
+
+var (
+ ErrDockerHostNotSet = errors.New("DOCKER_HOST is not set")
+ ErrDockerSocketOverrideNotSet = errors.New("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE is not set")
+ ErrDockerSocketNotSetInContext = errors.New("socket not set in context")
+ ErrDockerSocketNotSetInProperties = errors.New("socket not set in ~/.testcontainers.properties")
+ ErrNoUnixSchema = errors.New("URL schema is not unix")
+ ErrSocketNotFound = errors.New("socket not found")
+ ErrSocketNotFoundInPath = errors.New("docker socket not found in " + DockerSocketPath)
+ // ErrTestcontainersHostNotSetInProperties this error is specific to Testcontainers
+ ErrTestcontainersHostNotSetInProperties = errors.New("tc.host not set in ~/.testcontainers.properties")
+)
+
+var (
+ dockerHostCache string
+ dockerHostOnce sync.Once
+)
+
+var (
+ dockerSocketPathCache string
+ dockerSocketPathOnce sync.Once
+)
+
+// deprecated
+// see https://github.com/testcontainers/testcontainers-java/blob/main/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L46
+func DefaultGatewayIP() (string, error) {
+ // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L27
+ cmd := exec.Command("sh", "-c", "ip route|awk '/default/ { print $3 }'")
+ stdout, err := cmd.Output()
+ if err != nil {
+ return "", errors.New("failed to detect docker host")
+ }
+ ip := strings.TrimSpace(string(stdout))
+ if len(ip) == 0 {
+ return "", errors.New("failed to parse default gateway IP")
+ }
+ return ip, nil
+}
+
+// dockerHostCheck Use a vanilla Docker client to check if the Docker host is reachable.
+// It will avoid recursive calls to this function.
+var dockerHostCheck = func(ctx context.Context, host string) error {
+ cli, err := client.NewClientWithOpts(client.FromEnv, client.WithHost(host), client.WithAPIVersionNegotiation())
+ if err != nil {
+ return fmt.Errorf("new client: %w", err)
+ }
+ defer cli.Close()
+
+ _, err = cli.Info(ctx)
+ if err != nil {
+ return fmt.Errorf("docker info: %w", err)
+ }
+
+ return nil
+}
+
+// MustExtractDockerHost Extracts the docker host from the different alternatives, caching the result to avoid unnecessary
+// calculations. Use this function to get the actual Docker host. This function does not consider Windows containers at the moment.
+// The possible alternatives are:
+//
+// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file.
+// 2. DOCKER_HOST environment variable.
+// 3. Docker host from context.
+// 4. Docker host from the default docker socket path, without the unix schema.
+// 5. Docker host from the "docker.host" property in the ~/.testcontainers.properties file.
+// 6. Rootless docker socket path.
+// 7. Else, because the Docker host is not set, it panics.
+func MustExtractDockerHost(ctx context.Context) string {
+ dockerHostOnce.Do(func() {
+ cache, err := extractDockerHost(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ dockerHostCache = cache
+ })
+
+ return dockerHostCache
+}
+
+// MustExtractDockerSocket Extracts the docker socket from the different alternatives, removing the socket schema and
+// caching the result to avoid unnecessary calculations. Use this function to get the docker socket path,
+// not the host (e.g. mounting the socket in a container). This function does not consider Windows containers at the moment.
+// The possible alternatives are:
+//
+// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file.
+// 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable.
+// 3. Using a Docker client, check if the Info().OperatingSystem is "Docker Desktop" and return the default docker socket path for rootless docker.
+// 4. Else, Get the current Docker Host from the existing strategies: see MustExtractDockerHost.
+// 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock)
+// 6. Else, the default location of the docker socket is used (/var/run/docker.sock)
+//
+// It panics if a Docker client cannot be created, or the Docker host cannot be discovered.
+func MustExtractDockerSocket(ctx context.Context) string {
+ dockerSocketPathOnce.Do(func() {
+ dockerSocketPathCache = extractDockerSocket(ctx)
+ })
+
+ return dockerSocketPathCache
+}
+
+// extractDockerHost Extracts the docker host from the different alternatives, without caching the result.
+// This internal method is handy for testing purposes.
+func extractDockerHost(ctx context.Context) (string, error) {
+ dockerHostFns := []func(context.Context) (string, error){
+ testcontainersHostFromProperties,
+ dockerHostFromEnv,
+ dockerHostFromContext,
+ dockerSocketPath,
+ dockerHostFromProperties,
+ rootlessDockerSocketPath,
+ }
+
+ var errs []error
+ for _, dockerHostFn := range dockerHostFns {
+ dockerHost, err := dockerHostFn(ctx)
+ if err != nil {
+ if !isHostNotSet(err) {
+ errs = append(errs, err)
+ }
+ continue
+ }
+
+ if err = dockerHostCheck(ctx, dockerHost); err != nil {
+ errs = append(errs, fmt.Errorf("check host %q: %w", dockerHost, err))
+ continue
+ }
+
+ return dockerHost, nil
+ }
+
+ if len(errs) > 0 {
+ return "", errors.Join(errs...)
+ }
+
+ return "", ErrSocketNotFound
+}
+
+// extractDockerSocket Extracts the docker socket from the different alternatives, without caching the result.
+// It will internally use the default Docker client, calling the internal method extractDockerSocketFromClient with it.
+// This internal method is handy for testing purposes.
+// It panics if a Docker client cannot be created, or the Docker host is not discovered.
+func extractDockerSocket(ctx context.Context) string {
+ cli, err := NewClient(ctx)
+ if err != nil {
+ panic(err) // a Docker client is required to get the Docker info
+ }
+ defer cli.Close()
+
+ return extractDockerSocketFromClient(ctx, cli)
+}
+
+// extractDockerSocketFromClient Extracts the docker socket from the different alternatives, without caching the result,
+// and receiving an instance of the Docker API client interface.
+// This internal method is handy for testing purposes, passing a mock type simulating the desired behaviour.
+// It panics if the Docker Info call errors, or the Docker host is not discovered.
+func extractDockerSocketFromClient(ctx context.Context, cli client.APIClient) string {
+ // check that the socket is not a tcp or unix socket
+ checkDockerSocketFn := func(socket string) string {
+ // this use case will cover the case when the docker host is a tcp socket
+ if strings.HasPrefix(socket, TCPSchema) {
+ return DockerSocketPath
+ }
+
+ if strings.HasPrefix(socket, DockerSocketSchema) {
+ return strings.Replace(socket, DockerSocketSchema, "", 1)
+ }
+
+ return socket
+ }
+
+ tcHost, err := testcontainersHostFromProperties(ctx)
+ if err == nil {
+ return checkDockerSocketFn(tcHost)
+ }
+
+ testcontainersDockerSocket, err := dockerSocketOverridePath()
+ if err == nil {
+ return checkDockerSocketFn(testcontainersDockerSocket)
+ }
+
+ info, err := cli.Info(ctx)
+ if err != nil {
+ panic(err) // Docker Info is required to get the Operating System
+ }
+
+ // Because Docker Desktop runs in a VM, we need to use the default docker path for rootless docker
+ if info.OperatingSystem == "Docker Desktop" {
+ if IsWindows() {
+ return WindowsDockerSocketPath
+ }
+
+ return DockerSocketPath
+ }
+
+ dockerHost, err := extractDockerHost(ctx)
+ if err != nil {
+ panic(err) // Docker host is required to get the Docker socket
+ }
+
+ return checkDockerSocketFn(dockerHost)
+}
+
+// isHostNotSet returns true if the error is related to the Docker host
+// not being set, false otherwise.
+func isHostNotSet(err error) bool {
+ switch {
+ case errors.Is(err, ErrTestcontainersHostNotSetInProperties),
+ errors.Is(err, ErrDockerHostNotSet),
+ errors.Is(err, ErrDockerSocketNotSetInContext),
+ errors.Is(err, ErrDockerSocketNotSetInProperties),
+ errors.Is(err, ErrSocketNotFoundInPath),
+ errors.Is(err, ErrXDGRuntimeDirNotSet),
+ errors.Is(err, ErrRootlessDockerNotFoundHomeRunDir),
+ errors.Is(err, ErrRootlessDockerNotFoundHomeDesktopDir),
+ errors.Is(err, ErrRootlessDockerNotFoundRunDir):
+ return true
+ default:
+ return false
+ }
+}
+
+// dockerHostFromEnv returns the docker host from the DOCKER_HOST environment variable, if it's not empty
+func dockerHostFromEnv(_ context.Context) (string, error) {
+ if dockerHostPath := os.Getenv("DOCKER_HOST"); dockerHostPath != "" {
+ return dockerHostPath, nil
+ }
+
+ return "", ErrDockerHostNotSet
+}
+
+// dockerHostFromContext returns the docker host from the Go context, if it's not empty
+func dockerHostFromContext(ctx context.Context) (string, error) {
+ if socketPath, ok := ctx.Value(DockerHostContextKey).(string); ok && socketPath != "" {
+ parsed, err := parseURL(socketPath)
+ if err != nil {
+ return "", err
+ }
+
+ return parsed, nil
+ }
+
+ return "", ErrDockerSocketNotSetInContext
+}
+
+// dockerHostFromProperties returns the docker host from the ~/.testcontainers.properties file, if it's not empty
+func dockerHostFromProperties(_ context.Context) (string, error) {
+ cfg := config.Read()
+ socketPath := cfg.Host
+ if socketPath != "" {
+ return socketPath, nil
+ }
+
+ return "", ErrDockerSocketNotSetInProperties
+}
+
+// dockerSocketOverridePath returns the docker socket from the TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable,
+// if it's not empty
+func dockerSocketOverridePath() (string, error) {
+ if dockerHostPath, exists := os.LookupEnv("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE"); exists {
+ return dockerHostPath, nil
+ }
+
+ return "", ErrDockerSocketOverrideNotSet
+}
+
+// dockerSocketPath returns the docker socket from the default docker socket path, if it's not empty
+// and the socket exists
+func dockerSocketPath(_ context.Context) (string, error) {
+ if fileExists(DockerSocketPath) {
+ return DockerSocketPathWithSchema, nil
+ }
+
+ return "", ErrSocketNotFoundInPath
+}
+
+// testcontainersHostFromProperties returns the testcontainers host from the ~/.testcontainers.properties file, if it's not empty
+func testcontainersHostFromProperties(_ context.Context) (string, error) {
+ cfg := config.Read()
+ testcontainersHost := cfg.TestcontainersHost
+ if testcontainersHost != "" {
+ parsed, err := parseURL(testcontainersHost)
+ if err != nil {
+ return "", err
+ }
+
+ return parsed, nil
+ }
+
+ return "", ErrTestcontainersHostNotSetInProperties
+}
+
+// DockerEnvFile is the file that is created when running inside a container.
+// It's a variable to allow testing.
+// TODO: Remove this once context rework is done, which eliminates need for the default network creation.
+var DockerEnvFile = "/.dockerenv"
+
+// InAContainer returns true if the code is running inside a container
+// See https://github.com/docker/docker/blob/a9fa38b1edf30b23cae3eade0be48b3d4b1de14b/daemon/initlayer/setup_unix.go#L25
+func InAContainer() bool {
+ return inAContainer(DockerEnvFile)
+}
+
+func inAContainer(path string) bool {
+ // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L15
+ if _, err := os.Stat(path); err == nil {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go
new file mode 100644
index 0000000..8108384
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go
@@ -0,0 +1,150 @@
+package core
+
+import (
+ "context"
+ "errors"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+)
+
+var (
+ ErrRootlessDockerNotFound = errors.New("rootless Docker not found")
+ ErrRootlessDockerNotFoundHomeDesktopDir = errors.New("checked path: ~/.docker/desktop/docker.sock")
+ ErrRootlessDockerNotFoundHomeRunDir = errors.New("checked path: ~/.docker/run/docker.sock")
+ ErrRootlessDockerNotFoundRunDir = errors.New("checked path: /run/user/${uid}/docker.sock")
+ ErrRootlessDockerNotFoundXDGRuntimeDir = errors.New("checked path: $XDG_RUNTIME_DIR")
+ ErrRootlessDockerNotSupportedWindows = errors.New("rootless Docker is not supported on Windows")
+ ErrXDGRuntimeDirNotSet = errors.New("XDG_RUNTIME_DIR is not set")
+)
+
+// baseRunDir is the base directory for the "/run/user/${uid}" directory.
+// It is a variable so it can be modified for testing.
+var baseRunDir = "/run"
+
+// IsWindows returns if the current OS is Windows. For that it checks the GOOS environment variable or the runtime.GOOS constant.
+func IsWindows() bool {
+ return os.Getenv("GOOS") == "windows" || runtime.GOOS == "windows"
+}
+
+// rootlessDockerSocketPath returns if the path to the rootless Docker socket exists.
+// The rootless socket path is determined by the following order:
+//
+// 1. XDG_RUNTIME_DIR environment variable.
+// 2. ~/.docker/run/docker.sock file.
+// 3. ~/.docker/desktop/docker.sock file.
+// 4. /run/user/${uid}/docker.sock file.
+// 5. Else, return ErrRootlessDockerNotFound, wrapping specific errors for each of the above paths.
+//
+// It should include the Docker socket schema (unix://) in the returned path.
+func rootlessDockerSocketPath(_ context.Context) (string, error) {
+ // adding a manner to test it on non-windows machines, setting the GOOS env var to windows
+ // This is needed because runtime.GOOS is a constant that returns the OS of the machine running the test
+ if IsWindows() {
+ return "", ErrRootlessDockerNotSupportedWindows
+ }
+
+ socketPathFns := []func() (string, error){
+ rootlessSocketPathFromEnv,
+ rootlessSocketPathFromHomeRunDir,
+ rootlessSocketPathFromHomeDesktopDir,
+ rootlessSocketPathFromRunDir,
+ }
+
+ var errs []error
+ for _, socketPathFn := range socketPathFns {
+ s, err := socketPathFn()
+ if err != nil {
+ if !isHostNotSet(err) {
+ errs = append(errs, err)
+ }
+ continue
+ }
+
+ return DockerSocketSchema + s, nil
+ }
+
+ if len(errs) > 0 {
+ return "", errors.Join(errs...)
+ }
+
+ return "", ErrRootlessDockerNotFound
+}
+
+func fileExists(f string) bool {
+ _, err := os.Stat(f)
+ return err == nil
+}
+
+func parseURL(s string) (string, error) {
+ hostURL, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+
+ switch hostURL.Scheme {
+ case "unix", "npipe":
+ return hostURL.Path, nil
+ case "tcp":
+ // return the original URL, as it is a valid TCP URL
+ return s, nil
+ default:
+ return "", ErrNoUnixSchema
+ }
+}
+
+// rootlessSocketPathFromEnv returns the path to the rootless Docker socket from the XDG_RUNTIME_DIR environment variable.
+// It should include the Docker socket schema (unix://) in the returned path.
+func rootlessSocketPathFromEnv() (string, error) {
+ xdgRuntimeDir, exists := os.LookupEnv("XDG_RUNTIME_DIR")
+ if exists {
+ f := filepath.Join(xdgRuntimeDir, "docker.sock")
+ if fileExists(f) {
+ return f, nil
+ }
+
+ return "", ErrRootlessDockerNotFoundXDGRuntimeDir
+ }
+
+ return "", ErrXDGRuntimeDirNotSet
+}
+
+// rootlessSocketPathFromHomeRunDir returns the path to the rootless Docker socket from the ~/.docker/run/docker.sock file.
+func rootlessSocketPathFromHomeRunDir() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+
+ f := filepath.Join(home, ".docker", "run", "docker.sock")
+ if fileExists(f) {
+ return f, nil
+ }
+ return "", ErrRootlessDockerNotFoundHomeRunDir
+}
+
+// rootlessSocketPathFromHomeDesktopDir returns the path to the rootless Docker socket from the ~/.docker/desktop/docker.sock file.
+func rootlessSocketPathFromHomeDesktopDir() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+
+ f := filepath.Join(home, ".docker", "desktop", "docker.sock")
+ if fileExists(f) {
+ return f, nil
+ }
+ return "", ErrRootlessDockerNotFoundHomeDesktopDir
+}
+
+// rootlessSocketPathFromRunDir returns the path to the rootless Docker socket from the /run/user/<uid>/docker.sock file.
+func rootlessSocketPathFromRunDir() (string, error) {
+ uid := os.Getuid()
+ f := filepath.Join(baseRunDir, "user", strconv.Itoa(uid), "docker.sock")
+ if fileExists(f) {
+ return f, nil
+ }
+ return "", ErrRootlessDockerNotFoundRunDir
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_socket.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_socket.go
new file mode 100644
index 0000000..b0c0c84
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_socket.go
@@ -0,0 +1,49 @@
+package core
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/client"
+)
+
+// DockerSocketSchema is the unix schema.
+var DockerSocketSchema = "unix://"
+
+// DockerSocketPath is the path to the docker socket under unix systems.
+var DockerSocketPath = "/var/run/docker.sock"
+
+// DockerSocketPathWithSchema is the path to the docker socket under unix systems with the unix schema.
+var DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath
+
+// TCPSchema is the tcp schema.
+var TCPSchema = "tcp://"
+
+// WindowsDockerSocketPath is the path to the docker socket under windows systems.
+var WindowsDockerSocketPath = "//var/run/docker.sock"
+
+func init() {
+ const DefaultDockerHost = client.DefaultDockerHost
+
+ u, err := url.Parse(DefaultDockerHost)
+ if err != nil {
+ // unsupported default host specified by the docker client package,
+ // so revert to the default unix docker socket path
+ return
+ }
+
+ switch u.Scheme {
+ case "unix", "npipe":
+ DockerSocketSchema = u.Scheme + "://"
+ DockerSocketPath = u.Path
+ if !strings.HasPrefix(DockerSocketPath, "/") {
+ // seeing as the code in this module depends on DockerSocketPath having
+ // a slash (`/`) prefix, we add it here if it is missing.
+ // for the known environments, we do not foresee how the socket-path
+ // should miss the slash, however this extra if-condition is worth to
+ // save future pain from innocent users.
+ DockerSocketPath = "/" + DockerSocketPath
+ }
+ DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go
new file mode 100644
index 0000000..f073a90
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go
@@ -0,0 +1,132 @@
+package core
+
+import (
+ "bufio"
+ "io"
+ "net/url"
+ "os"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ IndexDockerIO = "https://index.docker.io/v1/"
+ maxURLRuneCount = 2083
+ minURLRuneCount = 3
+ URLSchema = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername = `(\S+(:\S*)?@)`
+ URLIP = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
+ IP = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSubdomain = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
+ URLPath = `((\/|\?|#)[^\s]*)`
+ URLPort = `(:(\d{1,5}))`
+ URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+)
+
+var rxURL = regexp.MustCompile(URL)
+
+// ExtractImagesFromDockerfile extracts images from the Dockerfile sourced from dockerfile.
+func ExtractImagesFromDockerfile(dockerfile string, buildArgs map[string]*string) ([]string, error) {
+ file, err := os.Open(dockerfile)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return ExtractImagesFromReader(file, buildArgs)
+}
+
+// ExtractImagesFromReader extracts images from the Dockerfile sourced from r.
+func ExtractImagesFromReader(r io.Reader, buildArgs map[string]*string) ([]string, error) {
+ var images []string
+ var lines []string
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ lines = append(lines, scanner.Text())
+ }
+ if scanner.Err() != nil {
+ return nil, scanner.Err()
+ }
+
+ // extract images from dockerfile
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if !strings.HasPrefix(strings.ToUpper(line), "FROM") {
+ continue
+ }
+
+ // remove FROM
+ line = strings.TrimPrefix(line, "FROM")
+ parts := strings.Split(strings.TrimSpace(line), " ")
+ if len(parts) == 0 {
+ continue
+ }
+
+ // interpolate build args
+ for k, v := range buildArgs {
+ if v != nil {
+ parts[0] = strings.ReplaceAll(parts[0], "${"+k+"}", *v)
+ }
+ }
+ images = append(images, parts[0])
+ }
+
+ return images, nil
+}
+
+// ExtractRegistry extracts the registry from the image name, using a regular expression to extract the registry from the image name.
+// regular expression to extract the registry from the image name
+// the regular expression is based on the grammar defined in
+// - image:tag
+// - image
+// - repository/image:tag
+// - repository/image
+// - registry/image:tag
+// - registry/image
+// - registry/repository/image:tag
+// - registry/repository/image
+// - registry:port/repository/image:tag
+// - registry:port/repository/image
+// - registry:port/image:tag
+// - registry:port/image
+// Once extracted the registry, it is validated to check if it is a valid URL or an IP address.
+func ExtractRegistry(image string, fallback string) string {
+ exp := regexp.MustCompile(`^(?:(?P<registry>(https?://)?[^/]+)(?::(?P<port>\d+))?/)?(?:(?P<repository>[^/]+)/)?(?P<image>[^:]+)(?::(?P<tag>.+))?$`).FindStringSubmatch(image)
+ if len(exp) == 0 {
+ return ""
+ }
+
+ registry := exp[1]
+
+ if IsURL(registry) {
+ return registry
+ }
+
+ return fallback
+}
+
+// IsURL checks if the string is a URL.
+// Extracted from https://github.com/asaskevich/govalidator/blob/f21760c49a8d/validator.go#L104
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ strTemp := str
+ if strings.Contains(str, ":") && !strings.Contains(str, "://") {
+ // support no indicated urlscheme but with colon for port number
+ // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
+ strTemp = "http://" + str
+ }
+ u, err := url.Parse(strTemp)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go
new file mode 100644
index 0000000..0814924
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go
@@ -0,0 +1,73 @@
+package core
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/testcontainers/testcontainers-go/internal"
+ "github.com/testcontainers/testcontainers-go/internal/config"
+)
+
+const (
+ // LabelBase is the base label for all testcontainers labels.
+ LabelBase = "org.testcontainers"
+
+ // LabelLang specifies the language which created the test container.
+ LabelLang = LabelBase + ".lang"
+
+ // LabelReaper identifies the container as a reaper.
+ LabelReaper = LabelBase + ".reaper"
+
+ // LabelRyuk identifies the container as a ryuk.
+ LabelRyuk = LabelBase + ".ryuk"
+
+ // LabelSessionID specifies the session ID of the container.
+ LabelSessionID = LabelBase + ".sessionId"
+
+ // LabelVersion specifies the version of testcontainers which created the container.
+ LabelVersion = LabelBase + ".version"
+
+ // LabelReap specifies the container should be reaped by the reaper.
+ LabelReap = LabelBase + ".reap"
+)
+
+// DefaultLabels returns the standard set of labels which
+// includes LabelSessionID if the reaper is enabled.
+func DefaultLabels(sessionID string) map[string]string {
+ labels := map[string]string{
+ LabelBase: "true",
+ LabelLang: "go",
+ LabelVersion: internal.Version,
+ LabelSessionID: sessionID,
+ }
+
+ if !config.Read().RyukDisabled {
+ labels[LabelReap] = "true"
+ }
+
+ return labels
+}
+
+// AddDefaultLabels adds the default labels for sessionID to target.
+func AddDefaultLabels(sessionID string, target map[string]string) {
+ for k, v := range DefaultLabels(sessionID) {
+ target[k] = v
+ }
+}
+
+// MergeCustomLabels sets labels from src to dst.
+// If a key in src has [LabelBase] prefix returns an error.
+// If dst is nil returns an error.
+func MergeCustomLabels(dst, src map[string]string) error {
+ if dst == nil {
+ return errors.New("destination map is nil")
+ }
+ for key, value := range src {
+ if strings.HasPrefix(key, LabelBase) {
+ return fmt.Errorf("key %q has %q prefix", key, LabelBase)
+ }
+ dst[key] = value
+ }
+ return nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/network/network.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/network/network.go
new file mode 100644
index 0000000..787065a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/network/network.go
@@ -0,0 +1,52 @@
+package network
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/network"
+
+ "github.com/testcontainers/testcontainers-go/internal/core"
+)
+
+const (
+ // FilterByID uses to filter network by identifier.
+ FilterByID = "id"
+
+ // FilterByName uses to filter network by name.
+ FilterByName = "name"
+)
+
+// Get returns a network by its ID.
+func Get(ctx context.Context, id string) (network.Inspect, error) {
+ return get(ctx, FilterByID, id)
+}
+
+// GetByName returns a network by its name.
+func GetByName(ctx context.Context, name string) (network.Inspect, error) {
+ return get(ctx, FilterByName, name)
+}
+
+func get(ctx context.Context, filter string, value string) (network.Inspect, error) {
+ var nw network.Inspect // initialize to the zero value
+
+ cli, err := core.NewClient(ctx)
+ if err != nil {
+ return nw, err
+ }
+ defer cli.Close()
+
+ list, err := cli.NetworkList(ctx, network.ListOptions{
+ Filters: filters.NewArgs(filters.Arg(filter, value)),
+ })
+ if err != nil {
+ return nw, fmt.Errorf("failed to list networks: %w", err)
+ }
+
+ if len(list) == 0 {
+ return nw, fmt.Errorf("network %s not found (filtering by %s)", value, filter)
+ }
+
+ return list[0], nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go
new file mode 100644
index 0000000..7e6da64
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go
@@ -0,0 +1,4 @@
+package internal
+
+// Version is the next development version of the application
+const Version = "0.36.0"
diff --git a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go
new file mode 100644
index 0000000..b6d8e25
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go
@@ -0,0 +1,671 @@
+package testcontainers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/go-connections/nat"
+
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+// ContainerRequestHook is a hook that will be called before a container is created.
+// It can be used to modify container configuration before it is created,
+// using the different lifecycle hooks that are available:
+// - Creating
+// For that, it will receive a ContainerRequest, modify it and return an error if needed.
+type ContainerRequestHook func(ctx context.Context, req ContainerRequest) error
+
+// ContainerHook is a hook that will be called after a container is created
+// It can be used to modify the state of the container after it is created,
+// using the different lifecycle hooks that are available:
+// - Created
+// - Starting
+// - Started
+// - Readied
+// - Stopping
+// - Stopped
+// - Terminating
+// - Terminated
+// For that, it will receive a Container, modify it and return an error if needed.
+type ContainerHook func(ctx context.Context, ctr Container) error
+
+// ContainerLifecycleHooks is a struct that contains all the hooks that can be used
+// to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks
+// will be passed to the container once it's created
+type ContainerLifecycleHooks struct {
+ PreBuilds []ContainerRequestHook
+ PostBuilds []ContainerRequestHook
+ PreCreates []ContainerRequestHook
+ PostCreates []ContainerHook
+ PreStarts []ContainerHook
+ PostStarts []ContainerHook
+ PostReadies []ContainerHook
+ PreStops []ContainerHook
+ PostStops []ContainerHook
+ PreTerminates []ContainerHook
+ PostTerminates []ContainerHook
+}
+
+// DefaultLoggingHook is a hook that will log the container lifecycle events
+var DefaultLoggingHook = func(logger log.Logger) ContainerLifecycleHooks {
+ shortContainerID := func(c Container) string {
+ return c.GetContainerID()[:12]
+ }
+
+ return ContainerLifecycleHooks{
+ PreBuilds: []ContainerRequestHook{
+ func(_ context.Context, req ContainerRequest) error {
+ logger.Printf("🐳 Building image %s:%s", req.GetRepo(), req.GetTag())
+ return nil
+ },
+ },
+ PostBuilds: []ContainerRequestHook{
+ func(_ context.Context, req ContainerRequest) error {
+ logger.Printf("✅ Built image %s", req.Image)
+ return nil
+ },
+ },
+ PreCreates: []ContainerRequestHook{
+ func(_ context.Context, req ContainerRequest) error {
+ logger.Printf("🐳 Creating container for image %s", req.Image)
+ return nil
+ },
+ },
+ PostCreates: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("✅ Container created: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PreStarts: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("🐳 Starting container: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PostStarts: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("✅ Container started: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PostReadies: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("🔔 Container is ready: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PreStops: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("🐳 Stopping container: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PostStops: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("✅ Container stopped: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PreTerminates: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("🐳 Terminating container: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ PostTerminates: []ContainerHook{
+ func(_ context.Context, c Container) error {
+ logger.Printf("🚫 Container terminated: %s", shortContainerID(c))
+ return nil
+ },
+ },
+ }
+}
+
+// defaultPreCreateHook is a hook that will apply the default configuration to the container
+var defaultPreCreateHook = func(p *DockerProvider, dockerInput *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) ContainerLifecycleHooks {
+ return ContainerLifecycleHooks{
+ PreCreates: []ContainerRequestHook{
+ func(ctx context.Context, req ContainerRequest) error {
+ return p.preCreateContainerHook(ctx, req, dockerInput, hostConfig, networkingConfig)
+ },
+ },
+ }
+}
+
+// defaultCopyFileToContainerHook is a hook that will copy files to the container after it's created
+// but before it's started
+var defaultCopyFileToContainerHook = func(files []ContainerFile) ContainerLifecycleHooks {
+ return ContainerLifecycleHooks{
+ PostCreates: []ContainerHook{
+ // copy files to container after it's created
+ func(ctx context.Context, c Container) error {
+ for _, f := range files {
+ if err := f.validate(); err != nil {
+ return fmt.Errorf("invalid file: %w", err)
+ }
+
+ var err error
+ // Bytes takes precedence over HostFilePath
+ if f.Reader != nil {
+ bs, ioerr := io.ReadAll(f.Reader)
+ if ioerr != nil {
+ return fmt.Errorf("can't read from reader: %w", ioerr)
+ }
+
+ err = c.CopyToContainer(ctx, bs, f.ContainerFilePath, f.FileMode)
+ } else {
+ err = c.CopyFileToContainer(ctx, f.HostFilePath, f.ContainerFilePath, f.FileMode)
+ }
+
+ if err != nil {
+ return fmt.Errorf("can't copy %s to container: %w", f.HostFilePath, err)
+ }
+ }
+
+ return nil
+ },
+ },
+ }
+}
+
+// defaultLogConsumersHook is a hook that will start log consumers after the container is started
+var defaultLogConsumersHook = func(cfg *LogConsumerConfig) ContainerLifecycleHooks {
+ return ContainerLifecycleHooks{
+ PostStarts: []ContainerHook{
+ // Produce logs sending details to the log consumers.
+ // See combineContainerHooks for the order of execution.
+ func(ctx context.Context, c Container) error {
+ if cfg == nil || len(cfg.Consumers) == 0 {
+ return nil
+ }
+
+ dockerContainer := c.(*DockerContainer)
+ dockerContainer.consumers = dockerContainer.consumers[:0]
+ for _, consumer := range cfg.Consumers {
+ dockerContainer.followOutput(consumer)
+ }
+
+ return dockerContainer.startLogProduction(ctx, cfg.Opts...)
+ },
+ },
+ PostStops: []ContainerHook{
+ // Stop the log production.
+ // See combineContainerHooks for the order of execution.
+ func(_ context.Context, c Container) error {
+ if cfg == nil || len(cfg.Consumers) == 0 {
+ return nil
+ }
+
+ dockerContainer := c.(*DockerContainer)
+ return dockerContainer.stopLogProduction()
+ },
+ },
+ }
+}
+
+func checkPortsMapped(exposedAndMappedPorts nat.PortMap, exposedPorts []string) error {
+ portMap, _, err := nat.ParsePortSpecs(exposedPorts)
+ if err != nil {
+ return fmt.Errorf("parse exposed ports: %w", err)
+ }
+
+ for exposedPort := range portMap {
+ // having entries in exposedAndMappedPorts, where the key is the exposed port,
+ // and the value is the mapped port, means that the port has been already mapped.
+ if _, ok := exposedAndMappedPorts[exposedPort]; ok {
+ continue
+ }
+
+ // check if the port is mapped with the protocol (default is TCP)
+ if strings.Contains(string(exposedPort), "/") {
+ return fmt.Errorf("port %s is not mapped yet", exposedPort)
+ }
+
+ // Port didn't have a type, default to tcp and retry.
+ exposedPort += "/tcp"
+ if _, ok := exposedAndMappedPorts[exposedPort]; !ok {
+ return fmt.Errorf("port %s is not mapped yet", exposedPort)
+ }
+ }
+
+ return nil
+}
+
+// defaultReadinessHook is a hook that will wait for the container to be ready
+var defaultReadinessHook = func() ContainerLifecycleHooks {
+ return ContainerLifecycleHooks{
+ PostStarts: []ContainerHook{
+ func(ctx context.Context, c Container) error {
+ // wait until all the exposed ports are mapped:
+ // it will be ready when all the exposed ports are mapped,
+ // checking every 50ms, up to 1s, and failing if all the
+ // exposed ports are not mapped in 5s.
+ dockerContainer := c.(*DockerContainer)
+
+ b := backoff.NewExponentialBackOff()
+
+ b.InitialInterval = 50 * time.Millisecond
+ b.MaxElapsedTime = 5 * time.Second
+ b.MaxInterval = time.Duration(float64(time.Second) * backoff.DefaultRandomizationFactor)
+
+ err := backoff.RetryNotify(
+ func() error {
+ jsonRaw, err := dockerContainer.inspectRawContainer(ctx)
+ if err != nil {
+ return err
+ }
+
+ return checkPortsMapped(jsonRaw.NetworkSettings.Ports, dockerContainer.exposedPorts)
+ },
+ b,
+ func(err error, _ time.Duration) {
+ dockerContainer.logger.Printf("All requested ports were not exposed: %v", err)
+ },
+ )
+ if err != nil {
+ return fmt.Errorf("all exposed ports, %s, were not mapped in 5s: %w", dockerContainer.exposedPorts, err)
+ }
+
+ return nil
+ },
+ // wait for the container to be ready
+ func(ctx context.Context, c Container) error {
+ dockerContainer := c.(*DockerContainer)
+
+ // if a Wait Strategy has been specified, wait before returning
+ if dockerContainer.WaitingFor != nil {
+ dockerContainer.logger.Printf(
+ "⏳ Waiting for container id %s image: %s. Waiting for: %+v",
+ dockerContainer.ID[:12], dockerContainer.Image, dockerContainer.WaitingFor,
+ )
+ if err := dockerContainer.WaitingFor.WaitUntilReady(ctx, c); err != nil {
+ return fmt.Errorf("wait until ready: %w", err)
+ }
+ }
+
+ dockerContainer.isRunning = true
+
+ return nil
+ },
+ },
+ }
+}
+
+// buildingHook is a hook that will be called before a container image is built.
+func (req ContainerRequest) buildingHook(ctx context.Context) error {
+ return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error {
+ return lifecycleHooks.Building(ctx)(req)
+ })
+}
+
+// builtHook is a hook that will be called after a container image is built.
+func (req ContainerRequest) builtHook(ctx context.Context) error {
+ return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error {
+ return lifecycleHooks.Built(ctx)(req)
+ })
+}
+
+// creatingHook is a hook that will be called before a container is created.
+func (req ContainerRequest) creatingHook(ctx context.Context) error {
+ return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error {
+ return lifecycleHooks.Creating(ctx)(req)
+ })
+}
+
+// applyLifecycleHooks calls hook on all LifecycleHooks.
+func (req ContainerRequest) applyLifecycleHooks(hook func(lifecycleHooks ContainerLifecycleHooks) error) error {
+ var errs []error
+ for _, lifecycleHooks := range req.LifecycleHooks {
+ if err := hook(lifecycleHooks); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+// createdHook is a hook that will be called after a container is created.
+func (c *DockerContainer) createdHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, false, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PostCreates
+ })
+}
+
+// startingHook is a hook that will be called before a container is started.
+func (c *DockerContainer) startingHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, true, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PreStarts
+ })
+}
+
+// startedHook is a hook that will be called after a container is started.
+func (c *DockerContainer) startedHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, true, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PostStarts
+ })
+}
+
+// readiedHook is a hook that will be called after a container is ready.
+func (c *DockerContainer) readiedHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, true, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PostReadies
+ })
+}
+
+// printLogs is a helper function that will print the logs of a Docker container
+// We are going to use this helper function to inform the user of the logs when an error occurs
+func (c *DockerContainer) printLogs(ctx context.Context, cause error) {
+ reader, err := c.Logs(ctx)
+ if err != nil {
+ c.logger.Printf("failed accessing container logs: %v\n", err)
+ return
+ }
+
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ c.logger.Printf("failed reading container logs: %v\n", err)
+ return
+ }
+
+ c.logger.Printf("container logs (%s):\n%s", cause, b)
+}
+
+// stoppingHook is a hook that will be called before a container is stopped.
+func (c *DockerContainer) stoppingHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, false, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PreStops
+ })
+}
+
+// stoppedHook is a hook that will be called after a container is stopped.
+func (c *DockerContainer) stoppedHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, false, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PostStops
+ })
+}
+
+// terminatingHook is a hook that will be called before a container is terminated.
+func (c *DockerContainer) terminatingHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, false, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PreTerminates
+ })
+}
+
+// terminatedHook is a hook that will be called after a container is terminated.
+func (c *DockerContainer) terminatedHook(ctx context.Context) error {
+ return c.applyLifecycleHooks(ctx, false, func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook {
+ return lifecycleHooks.PostTerminates
+ })
+}
+
+// applyLifecycleHooks applies all lifecycle hooks reporting the container logs on error if logError is true.
+func (c *DockerContainer) applyLifecycleHooks(ctx context.Context, logError bool, hooks func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook) error {
+ var errs []error
+ for _, lifecycleHooks := range c.lifecycleHooks {
+ if err := containerHookFn(ctx, hooks(lifecycleHooks))(c); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if err := errors.Join(errs...); err != nil {
+ if logError {
+ select {
+ case <-ctx.Done():
+ // Context has timed out so need a new context to get logs.
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ c.printLogs(ctx, err)
+ default:
+ c.printLogs(ctx, err)
+ }
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+// Building is a hook that will be called before a container image is built.
+func (c ContainerLifecycleHooks) Building(ctx context.Context) func(req ContainerRequest) error {
+ return containerRequestHook(ctx, c.PreBuilds)
+}
+
+// Building is a hook that will be called before a container image is built.
+func (c ContainerLifecycleHooks) Built(ctx context.Context) func(req ContainerRequest) error {
+ return containerRequestHook(ctx, c.PostBuilds)
+}
+
+// Creating is a hook that will be called before a container is created.
+func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req ContainerRequest) error {
+ return containerRequestHook(ctx, c.PreCreates)
+}
+
+// containerRequestHook returns a function that will iterate over all
+// the hooks and call them one by one until there is an error.
+func containerRequestHook(ctx context.Context, hooks []ContainerRequestHook) func(req ContainerRequest) error {
+ return func(req ContainerRequest) error {
+ for _, hook := range hooks {
+ if err := hook(ctx, req); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+}
+
+// containerHookFn is a helper function that will create a function to be returned by all the different
+// container lifecycle hooks. The created function will iterate over all the hooks and call them one by one.
+func containerHookFn(ctx context.Context, containerHook []ContainerHook) func(container Container) error {
+ return func(ctr Container) error {
+ var errs []error
+ for _, hook := range containerHook {
+ if err := hook(ctx, ctr); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return errors.Join(errs...)
+ }
+}
+
+// Created is a hook that will be called after a container is created
+func (c ContainerLifecycleHooks) Created(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PostCreates)
+}
+
+// Starting is a hook that will be called before a container is started
+func (c ContainerLifecycleHooks) Starting(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PreStarts)
+}
+
+// Started is a hook that will be called after a container is started
+func (c ContainerLifecycleHooks) Started(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PostStarts)
+}
+
+// Readied is a hook that will be called after a container is ready
+func (c ContainerLifecycleHooks) Readied(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PostReadies)
+}
+
+// Stopping is a hook that will be called before a container is stopped
+func (c ContainerLifecycleHooks) Stopping(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PreStops)
+}
+
+// Stopped is a hook that will be called after a container is stopped
+func (c ContainerLifecycleHooks) Stopped(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PostStops)
+}
+
+// Terminating is a hook that will be called before a container is terminated
+func (c ContainerLifecycleHooks) Terminating(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PreTerminates)
+}
+
+// Terminated is a hook that will be called after a container is terminated
+func (c ContainerLifecycleHooks) Terminated(ctx context.Context) func(container Container) error {
+ return containerHookFn(ctx, c.PostTerminates)
+}
+
+func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req ContainerRequest, dockerInput *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error {
+ // prepare mounts
+ hostConfig.Mounts = mapToDockerMounts(req.Mounts)
+
+ endpointSettings := map[string]*network.EndpointSettings{}
+
+ // #248: Docker allows only one network to be specified during container creation
+ // If there is more than one network specified in the request container should be attached to them
+ // once it is created. We will take a first network if any specified in the request and use it to create container
+ if len(req.Networks) > 0 {
+ attachContainerTo := req.Networks[0]
+
+ nw, err := p.GetNetwork(ctx, NetworkRequest{
+ Name: attachContainerTo,
+ })
+ if err == nil {
+ aliases := []string{}
+ if _, ok := req.NetworkAliases[attachContainerTo]; ok {
+ aliases = req.NetworkAliases[attachContainerTo]
+ }
+ endpointSetting := network.EndpointSettings{
+ Aliases: aliases,
+ NetworkID: nw.ID,
+ }
+ endpointSettings[attachContainerTo] = &endpointSetting
+ }
+ }
+
+ if req.ConfigModifier != nil {
+ req.ConfigModifier(dockerInput)
+ }
+
+ if req.HostConfigModifier == nil {
+ req.HostConfigModifier = defaultHostConfigModifier(req)
+ }
+ req.HostConfigModifier(hostConfig)
+
+ if req.EndpointSettingsModifier != nil {
+ req.EndpointSettingsModifier(endpointSettings)
+ }
+
+ networkingConfig.EndpointsConfig = endpointSettings
+
+ exposedPorts := req.ExposedPorts
+ // this check must be done after the pre-creation Modifiers are called, so the network mode is already set
+ if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() {
+ image, err := p.client.ImageInspect(ctx, dockerInput.Image)
+ if err != nil {
+ return err
+ }
+ for p := range image.Config.ExposedPorts {
+ exposedPorts = append(exposedPorts, string(p))
+ }
+ }
+
+ exposedPortSet, exposedPortMap, err := nat.ParsePortSpecs(exposedPorts)
+ if err != nil {
+ return err
+ }
+
+ dockerInput.ExposedPorts = exposedPortSet
+
+ // only exposing those ports automatically if the container request exposes zero ports and the container does not run in a container network
+ if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() {
+ hostConfig.PortBindings = exposedPortMap
+ } else {
+ hostConfig.PortBindings = mergePortBindings(hostConfig.PortBindings, exposedPortMap, req.ExposedPorts)
+ }
+
+ return nil
+}
+
+// combineContainerHooks returns a ContainerLifecycle hook as the result
+// of combining the default hooks with the user-defined hooks.
+//
+// The order of hooks is the following:
+// - Pre-hooks run the default hooks first then the user-defined hooks
+// - Post-hooks run the user-defined hooks first then the default hooks
+func combineContainerHooks(defaultHooks, userDefinedHooks []ContainerLifecycleHooks) ContainerLifecycleHooks {
+ // We use reflection here to ensure that any new hooks are handled.
+ var hooks ContainerLifecycleHooks
+ hooksVal := reflect.ValueOf(&hooks).Elem()
+ hooksType := reflect.TypeOf(hooks)
+ for _, defaultHook := range defaultHooks {
+ defaultVal := reflect.ValueOf(defaultHook)
+ for i := 0; i < hooksType.NumField(); i++ {
+ if strings.HasPrefix(hooksType.Field(i).Name, "Pre") {
+ field := hooksVal.Field(i)
+ field.Set(reflect.AppendSlice(field, defaultVal.Field(i)))
+ }
+ }
+ }
+
+ // Append the user-defined hooks after the default pre-hooks
+ // and because the post hooks are still empty, the user-defined
+ // post-hooks will be the first ones to be executed.
+ for _, userDefinedHook := range userDefinedHooks {
+ userVal := reflect.ValueOf(userDefinedHook)
+ for i := 0; i < hooksType.NumField(); i++ {
+ field := hooksVal.Field(i)
+ field.Set(reflect.AppendSlice(field, userVal.Field(i)))
+ }
+ }
+
+ // Finally, append the default post-hooks.
+ for _, defaultHook := range defaultHooks {
+ defaultVal := reflect.ValueOf(defaultHook)
+ for i := 0; i < hooksType.NumField(); i++ {
+ if strings.HasPrefix(hooksType.Field(i).Name, "Post") {
+ field := hooksVal.Field(i)
+ field.Set(reflect.AppendSlice(field, defaultVal.Field(i)))
+ }
+ }
+ }
+
+ return hooks
+}
+
+func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts []string) nat.PortMap {
+ if exposedPortMap == nil {
+ exposedPortMap = make(map[nat.Port][]nat.PortBinding)
+ }
+
+ mappedPorts := make(map[string]struct{}, len(exposedPorts))
+ for _, p := range exposedPorts {
+ p = strings.Split(p, "/")[0]
+ mappedPorts[p] = struct{}{}
+ }
+
+ for k, v := range configPortMap {
+ if _, ok := mappedPorts[k.Port()]; ok {
+ exposedPortMap[k] = v
+ }
+ }
+ return exposedPortMap
+}
+
+// defaultHostConfigModifier provides a default modifier including the deprecated fields
+func defaultHostConfigModifier(req ContainerRequest) func(hostConfig *container.HostConfig) {
+ return func(hostConfig *container.HostConfig) {
+ hostConfig.AutoRemove = req.AutoRemove
+ hostConfig.CapAdd = req.CapAdd
+ hostConfig.CapDrop = req.CapDrop
+ hostConfig.Binds = req.Binds
+ hostConfig.ExtraHosts = req.ExtraHosts
+ hostConfig.NetworkMode = req.NetworkMode
+ hostConfig.Resources = req.Resources
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/log/logger.go b/vendor/github.com/testcontainers/testcontainers-go/log/logger.go
new file mode 100644
index 0000000..d20e90a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/log/logger.go
@@ -0,0 +1,73 @@
+package log
+
+import (
+ "log"
+ "os"
+ "strings"
+ "testing"
+)
+
+// Validate our types implement the required interfaces.
+var (
+ _ Logger = (*log.Logger)(nil)
+ _ Logger = (*noopLogger)(nil)
+ _ Logger = (*testLogger)(nil)
+)
+
+// Logger defines the Logger interface.
+type Logger interface {
+ Printf(format string, v ...any)
+}
+
+// defaultLogger is the default Logger instance.
+var defaultLogger Logger = &noopLogger{}
+
+func init() {
+ // Enable default logger in the testing with a verbose flag.
+ if testing.Testing() {
+ // Parse manually because testing.Verbose() panics unless flag.Parse() has done.
+ for _, arg := range os.Args {
+ if strings.EqualFold(arg, "-test.v=true") || strings.EqualFold(arg, "-v") {
+ defaultLogger = log.New(os.Stderr, "", log.LstdFlags)
+ }
+ }
+ }
+}
+
+// Default returns the default Logger instance.
+func Default() Logger {
+ return defaultLogger
+}
+
+// SetDefault sets the default Logger instance.
+func SetDefault(logger Logger) {
+ defaultLogger = logger
+}
+
+func Printf(format string, v ...any) {
+ defaultLogger.Printf(format, v...)
+}
+
+type noopLogger struct{}
+
+// Printf implements Logging.
+func (n noopLogger) Printf(_ string, _ ...any) {
+ // NOOP
+}
+
+// TestLogger returns a Logging implementation for testing.TB
+// This way logs from testcontainers are part of the test output of a test suite or test case.
+func TestLogger(tb testing.TB) Logger {
+ tb.Helper()
+ return testLogger{TB: tb}
+}
+
+type testLogger struct {
+ testing.TB
+}
+
+// Printf implements Logging.
+func (t testLogger) Printf(format string, v ...any) {
+ t.Helper()
+ t.Logf(format, v...)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go
new file mode 100644
index 0000000..95bf111
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go
@@ -0,0 +1,36 @@
+package testcontainers
+
+// StdoutLog is the log type for STDOUT
+const StdoutLog = "STDOUT"
+
+// StderrLog is the log type for STDERR
+const StderrLog = "STDERR"
+
+// logStruct {
+
+// Log represents a message that was created by a process,
+// LogType is either "STDOUT" or "STDERR",
+// Content is the byte contents of the message itself
+type Log struct {
+ LogType string
+ Content []byte
+}
+
+// }
+
+// logConsumerInterface {
+
+// LogConsumer represents any object that can
+// handle a Log, it is up to the LogConsumer instance
+// what to do with the log
+type LogConsumer interface {
+ Accept(Log)
+}
+
+// }
+
+// LogConsumerConfig is a configuration object for the producer/consumer pattern
+type LogConsumerConfig struct {
+ Opts []LogProductionOption // options for the production of logs
+ Consumers []LogConsumer // consumers for the logs
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger_option.go b/vendor/github.com/testcontainers/testcontainers-go/logger_option.go
new file mode 100644
index 0000000..d40dd93
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/logger_option.go
@@ -0,0 +1,45 @@
+package testcontainers
+
+import "github.com/testcontainers/testcontainers-go/log"
+
+// Validate our types implement the required interfaces.
+var (
+ _ ContainerCustomizer = LoggerOption{}
+ _ GenericProviderOption = LoggerOption{}
+ _ DockerProviderOption = LoggerOption{}
+)
+
+// WithLogger returns a generic option that sets the logger to be used.
+//
+// Consider calling this before other "With functions" as these may generate logs.
+//
+// This can be given a TestLogger to collect the logs from testcontainers into a
+// test case.
+func WithLogger(logger log.Logger) LoggerOption {
+ return LoggerOption{
+ logger: logger,
+ }
+}
+
+// LoggerOption is a generic option that sets the logger to be used.
+//
+// It can be used to set the logger for providers and containers.
+type LoggerOption struct {
+ logger log.Logger
+}
+
+// ApplyGenericTo implements GenericProviderOption.
+func (o LoggerOption) ApplyGenericTo(opts *GenericProviderOptions) {
+ opts.Logger = o.logger
+}
+
+// ApplyDockerTo implements DockerProviderOption.
+func (o LoggerOption) ApplyDockerTo(opts *DockerProviderOptions) {
+ opts.Logger = o.logger
+}
+
+// Customize implements ContainerCustomizer.
+func (o LoggerOption) Customize(req *GenericContainerRequest) error {
+ req.Logger = o.logger
+ return nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml
new file mode 100644
index 0000000..8668ef5
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml
@@ -0,0 +1,147 @@
+# This file is autogenerated by the 'modulegen' tool.
+site_name: Testcontainers for Go
+site_url: https://golang.testcontainers.org
+plugins:
+ - search
+ - codeinclude
+ - include-markdown
+ - markdownextradata
+theme:
+ name: material
+ custom_dir: docs/theme
+ palette:
+ scheme: testcontainers
+ font:
+ text: Roboto
+ code: Roboto Mono
+ logo: logo.svg
+ favicon: favicon.ico
+extra_css:
+ - css/extra.css
+ - css/tc-header.css
+repo_name: testcontainers-go
+repo_url: https://github.com/testcontainers/testcontainers-go
+markdown_extensions:
+ - admonition
+ - codehilite:
+ linenums: false
+ - pymdownx.superfences
+ - pymdownx.tabbed:
+ alternate_style: true
+ - pymdownx.snippets
+ - toc:
+ permalink: true
+ - attr_list
+ - pymdownx.emoji:
+ emoji_generator: !!python/name:material.extensions.emoji.to_svg
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
+nav:
+ - Home: index.md
+ - Quickstart: quickstart.md
+ - Features:
+ - features/creating_container.md
+ - features/configuration.md
+ - features/image_name_substitution.md
+ - features/files_and_mounts.md
+ - features/creating_networks.md
+ - features/networking.md
+ - features/tls.md
+ - features/test_session_semantics.md
+ - features/garbage_collector.md
+ - features/build_from_dockerfile.md
+ - features/docker_auth.md
+ - features/docker_compose.md
+ - features/follow_logs.md
+ - features/override_container_command.md
+ - Wait Strategies:
+ - Introduction: features/wait/introduction.md
+ - Exec: features/wait/exec.md
+ - Exit: features/wait/exit.md
+ - File: features/wait/file.md
+ - Health: features/wait/health.md
+ - HostPort: features/wait/host_port.md
+ - HTTP: features/wait/http.md
+ - Log: features/wait/log.md
+ - Multi: features/wait/multi.md
+ - SQL: features/wait/sql.md
+ - TLS: features/wait/tls.md
+ - Walk: features/wait/walk.md
+ - Modules:
+ - modules/index.md
+ - modules/artemis.md
+ - modules/azure.md
+ - modules/azurite.md
+ - modules/cassandra.md
+ - modules/chroma.md
+ - modules/clickhouse.md
+ - modules/cockroachdb.md
+ - modules/consul.md
+ - modules/couchbase.md
+ - modules/databend.md
+ - modules/dind.md
+ - modules/dolt.md
+ - modules/dynamodb.md
+ - modules/elasticsearch.md
+ - modules/etcd.md
+ - modules/gcloud.md
+ - modules/grafana-lgtm.md
+ - modules/inbucket.md
+ - modules/influxdb.md
+ - modules/k3s.md
+ - modules/k6.md
+ - modules/kafka.md
+ - modules/localstack.md
+ - modules/mariadb.md
+ - modules/meilisearch.md
+ - modules/milvus.md
+ - modules/minio.md
+ - modules/mockserver.md
+ - modules/mongodb.md
+ - modules/mssql.md
+ - modules/mysql.md
+ - modules/nats.md
+ - modules/neo4j.md
+ - modules/ollama.md
+ - modules/openfga.md
+ - modules/openldap.md
+ - modules/opensearch.md
+ - modules/pinecone.md
+ - modules/postgres.md
+ - modules/pulsar.md
+ - modules/qdrant.md
+ - modules/rabbitmq.md
+ - modules/redis.md
+ - modules/redpanda.md
+ - modules/registry.md
+ - modules/scylladb.md
+ - modules/surrealdb.md
+ - modules/valkey.md
+ - modules/vault.md
+ - modules/vearch.md
+ - modules/weaviate.md
+ - modules/yugabytedb.md
+ - Examples:
+ - examples/index.md
+ - examples/nginx.md
+ - examples/toxiproxy.md
+ - System Requirements:
+ - system_requirements/index.md
+ - system_requirements/docker.md
+ - Continuous Integration:
+ - system_requirements/ci/aws_codebuild.md
+ - system_requirements/ci/bitbucket_pipelines.md
+ - system_requirements/ci/circle_ci.md
+ - system_requirements/ci/concourse_ci.md
+ - system_requirements/ci/dind_patterns.md
+ - system_requirements/ci/drone.md
+ - system_requirements/ci/gitlab_ci.md
+ - system_requirements/ci/tekton.md
+ - system_requirements/ci/travis.md
+ - system_requirements/using_colima.md
+ - system_requirements/using_podman.md
+ - system_requirements/rancher.md
+ - Contributing: contributing.md
+ - Getting help: getting_help.md
+edit_uri: edit/main/docs/
+extra:
+ latest_version: v0.36.0
diff --git a/vendor/github.com/testcontainers/testcontainers-go/mounts.go b/vendor/github.com/testcontainers/testcontainers-go/mounts.go
new file mode 100644
index 0000000..a68e468
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/mounts.go
@@ -0,0 +1,126 @@
+package testcontainers
+
+import "errors"
+
+const (
+ MountTypeBind MountType = iota // Deprecated: Use MountTypeVolume instead
+ MountTypeVolume
+ MountTypeTmpfs
+ MountTypePipe
+)
+
+var (
+ ErrDuplicateMountTarget = errors.New("duplicate mount target detected")
+ ErrInvalidBindMount = errors.New("invalid bind mount")
+)
+
+var (
+ _ ContainerMountSource = (*GenericBindMountSource)(nil) // Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+ _ ContainerMountSource = (*GenericVolumeMountSource)(nil)
+ _ ContainerMountSource = (*GenericTmpfsMountSource)(nil)
+)
+
+type (
+ // ContainerMounts represents a collection of mounts for a container
+ ContainerMounts []ContainerMount
+ MountType uint
+)
+
+// ContainerMountSource is the base for all mount sources
+type ContainerMountSource interface {
+ // Source will be used as Source field in the final mount
+ // this might either be a volume name, a host path or might be empty e.g. for Tmpfs
+ Source() string
+
+ // Type determines the final mount type
+ // possible options are limited by the Docker API
+ Type() MountType
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+// GenericBindMountSource implements ContainerMountSource and represents a bind mount
+// Optionally mount.BindOptions might be added for advanced scenarios
+type GenericBindMountSource struct {
+ // HostPath is the path mounted into the container
+ // the same host path might be mounted to multiple locations within a single container
+ HostPath string
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+func (s GenericBindMountSource) Source() string {
+ return s.HostPath
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+func (GenericBindMountSource) Type() MountType {
+ return MountTypeBind
+}
+
+// GenericVolumeMountSource implements ContainerMountSource and represents a volume mount
+type GenericVolumeMountSource struct {
+ // Name refers to the name of the volume to be mounted
+ // the same volume might be mounted to multiple locations within a single container
+ Name string
+}
+
+func (s GenericVolumeMountSource) Source() string {
+ return s.Name
+}
+
+func (GenericVolumeMountSource) Type() MountType {
+ return MountTypeVolume
+}
+
+// GenericTmpfsMountSource implements ContainerMountSource and represents a TmpFS mount
+// Optionally mount.TmpfsOptions might be added for advanced scenarios
+type GenericTmpfsMountSource struct{}
+
+func (s GenericTmpfsMountSource) Source() string {
+ return ""
+}
+
+func (GenericTmpfsMountSource) Type() MountType {
+ return MountTypeTmpfs
+}
+
+// ContainerMountTarget represents the target path within a container where the mount will be available
+// Note that mount targets must be unique. It's not supported to mount different sources to the same target.
+type ContainerMountTarget string
+
+func (t ContainerMountTarget) Target() string {
+ return string(t)
+}
+
+// Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments
+// BindMount returns a new ContainerMount with a GenericBindMountSource as source
+// This is a convenience method to cover typical use cases.
+func BindMount(hostPath string, mountTarget ContainerMountTarget) ContainerMount {
+ return ContainerMount{
+ Source: GenericBindMountSource{HostPath: hostPath},
+ Target: mountTarget,
+ }
+}
+
+// VolumeMount returns a new ContainerMount with a GenericVolumeMountSource as source
+// This is a convenience method to cover typical use cases.
+func VolumeMount(volumeName string, mountTarget ContainerMountTarget) ContainerMount {
+ return ContainerMount{
+ Source: GenericVolumeMountSource{Name: volumeName},
+ Target: mountTarget,
+ }
+}
+
+// Mounts returns a ContainerMounts to support a more fluent API
+func Mounts(mounts ...ContainerMount) ContainerMounts {
+ return mounts
+}
+
+// ContainerMount models a mount into a container
+type ContainerMount struct {
+ // Source is typically either a GenericVolumeMountSource, as BindMount is not supported by all Docker environments
+ Source ContainerMountSource
+ // Target is the path where the mount should be mounted within the container
+ Target ContainerMountTarget
+ // ReadOnly determines if the mount should be read-only
+ ReadOnly bool
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go
new file mode 100644
index 0000000..e0cc83f
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/network.go
@@ -0,0 +1,60 @@
+package testcontainers
+
+import (
+ "context"
+
+ "github.com/docker/docker/api/types/network"
+
+ "github.com/testcontainers/testcontainers-go/internal/core"
+)
+
+// NetworkProvider allows the creation of networks on an arbitrary system
+type NetworkProvider interface {
+ CreateNetwork(context.Context, NetworkRequest) (Network, error) // create a network
+ GetNetwork(context.Context, NetworkRequest) (network.Inspect, error) // get a network
+}
+
+// Deprecated: will be removed in the future
+// Network allows getting info about a single network instance
+type Network interface {
+ Remove(context.Context) error // removes the network
+}
+
+// Deprecated: will be removed in the future.
+type DefaultNetwork string
+
+// Deprecated: will be removed in the future.
+func (n DefaultNetwork) ApplyGenericTo(opts *GenericProviderOptions) {
+ opts.defaultNetwork = string(n)
+}
+
+// Deprecated: will be removed in the future.
+func (n DefaultNetwork) ApplyDockerTo(opts *DockerProviderOptions) {
+ opts.defaultNetwork = string(n)
+}
+
+// Deprecated: will be removed in the future
+// NetworkRequest represents the parameters used to get a network
+type NetworkRequest struct {
+ Driver string
+ CheckDuplicate bool // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client package to older daemons.
+ Internal bool
+ EnableIPv6 *bool
+ Name string
+ Labels map[string]string
+ Attachable bool
+ IPAM *network.IPAM
+
+ SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable
+ ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper registry
+ ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session
+}
+
+// sessionID returns the session ID for the network request.
+func (r NetworkRequest) sessionID() string {
+ if sessionID := r.Labels[core.LabelSessionID]; sessionID != "" {
+ return sessionID
+ }
+
+ return core.SessionID()
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/options.go b/vendor/github.com/testcontainers/testcontainers-go/options.go
new file mode 100644
index 0000000..f17de88
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/options.go
@@ -0,0 +1,336 @@
+package testcontainers
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "time"
+
+ "dario.cat/mergo"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+
+ tcexec "github.com/testcontainers/testcontainers-go/exec"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+// ContainerCustomizer is an interface that can be used to configure the Testcontainers container
+// request. The passed request will be merged with the default one.
+type ContainerCustomizer interface {
+ Customize(req *GenericContainerRequest) error
+}
+
+// CustomizeRequestOption is a type that can be used to configure the Testcontainers container request.
+// The passed request will be merged with the default one.
+type CustomizeRequestOption func(req *GenericContainerRequest) error
+
+func (opt CustomizeRequestOption) Customize(req *GenericContainerRequest) error {
+ return opt(req)
+}
+
+// CustomizeRequest returns a function that can be used to merge the passed container request with the one that is used by the container.
+// Slices and Maps will be appended.
+func CustomizeRequest(src GenericContainerRequest) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ if err := mergo.Merge(req, &src, mergo.WithOverride, mergo.WithAppendSlice); err != nil {
+ return fmt.Errorf("error merging container request, keeping the original one: %w", err)
+ }
+
+ return nil
+ }
+}
+
+// WithConfigModifier allows to override the default container config
+func WithConfigModifier(modifier func(config *container.Config)) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.ConfigModifier = modifier
+
+ return nil
+ }
+}
+
+// WithEndpointSettingsModifier allows to override the default endpoint settings
+func WithEndpointSettingsModifier(modifier func(settings map[string]*network.EndpointSettings)) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.EndpointSettingsModifier = modifier
+
+ return nil
+ }
+}
+
+// WithEnv sets the environment variables for a container.
+// If the environment variable already exists, it will be overridden.
+func WithEnv(envs map[string]string) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ if req.Env == nil {
+ req.Env = map[string]string{}
+ }
+
+ for key, val := range envs {
+ req.Env[key] = val
+ }
+
+ return nil
+ }
+}
+
+// WithHostConfigModifier allows to override the default host config
+func WithHostConfigModifier(modifier func(hostConfig *container.HostConfig)) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.HostConfigModifier = modifier
+
+ return nil
+ }
+}
+
+// WithHostPortAccess allows to expose the host ports to the container
+func WithHostPortAccess(ports ...int) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ if req.HostAccessPorts == nil {
+ req.HostAccessPorts = []int{}
+ }
+
+ req.HostAccessPorts = append(req.HostAccessPorts, ports...)
+ return nil
+ }
+}
+
+// Deprecated: the modules API forces passing the image as part of the signature of the Run function.
+// WithImage sets the image for a container
+func WithImage(image string) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.Image = image
+
+ return nil
+ }
+}
+
+// imageSubstitutor {
+
+// ImageSubstitutor represents a way to substitute container image names
+type ImageSubstitutor interface {
+ // Description returns the name of the type and a short description of how it modifies the image.
+ // Useful to be printed in logs
+ Description() string
+ Substitute(image string) (string, error)
+}
+
+// }
+
+// CustomHubSubstitutor represents a way to substitute the hub of an image with a custom one,
+// using provided value with respect to the HubImageNamePrefix configuration value.
+type CustomHubSubstitutor struct {
+ hub string
+}
+
+// NewCustomHubSubstitutor creates a new CustomHubSubstitutor
+func NewCustomHubSubstitutor(hub string) CustomHubSubstitutor {
+ return CustomHubSubstitutor{
+ hub: hub,
+ }
+}
+
+// Description returns the name of the type and a short description of how it modifies the image.
+func (c CustomHubSubstitutor) Description() string {
+ return fmt.Sprintf("CustomHubSubstitutor (replaces hub with %s)", c.hub)
+}
+
+// Substitute replaces the hub of the image with the provided one, with certain conditions:
+// - if the hub is empty, the image is returned as is.
+// - if the image already contains a registry, the image is returned as is.
+// - if the HubImageNamePrefix configuration value is set, the image is returned as is.
+func (c CustomHubSubstitutor) Substitute(image string) (string, error) {
+ registry := core.ExtractRegistry(image, "")
+ cfg := ReadConfig()
+
+ exclusions := []func() bool{
+ func() bool { return c.hub == "" },
+ func() bool { return registry != "" },
+ func() bool { return cfg.Config.HubImageNamePrefix != "" },
+ }
+
+ for _, exclusion := range exclusions {
+ if exclusion() {
+ return image, nil
+ }
+ }
+
+ result, err := url.JoinPath(c.hub, image)
+ if err != nil {
+ return "", err
+ }
+
+ return result, nil
+}
+
+// prependHubRegistry represents a way to prepend a custom Hub registry to the image name,
+// using the HubImageNamePrefix configuration value
+type prependHubRegistry struct {
+ prefix string
+}
+
+// newPrependHubRegistry creates a new prependHubRegistry
+func newPrependHubRegistry(hubPrefix string) prependHubRegistry {
+ return prependHubRegistry{
+ prefix: hubPrefix,
+ }
+}
+
+// Description returns the name of the type and a short description of how it modifies the image.
+func (p prependHubRegistry) Description() string {
+ return fmt.Sprintf("HubImageSubstitutor (prepends %s)", p.prefix)
+}
+
+// Substitute prepends the Hub prefix to the image name, with certain conditions:
+// - if the prefix is empty, the image is returned as is.
+// - if the image is a non-hub image (e.g. where another registry is set), the image is returned as is.
+// - if the image is a Docker Hub image where the hub registry is explicitly part of the name
+// (i.e. anything with a registry.hub.docker.com host part), the image is returned as is.
+func (p prependHubRegistry) Substitute(image string) (string, error) {
+ registry := core.ExtractRegistry(image, "")
+
+ // add the exclusions in the right order
+ exclusions := []func() bool{
+ func() bool { return p.prefix == "" }, // no prefix set at the configuration level
+ func() bool { return registry != "" }, // non-hub image
+ func() bool { return registry == "docker.io" }, // explicitly including docker.io
+ func() bool { return registry == "registry.hub.docker.com" }, // explicitly including registry.hub.docker.com
+ }
+
+ for _, exclusion := range exclusions {
+ if exclusion() {
+ return image, nil
+ }
+ }
+
+ result, err := url.JoinPath(p.prefix, image)
+ if err != nil {
+ return "", err
+ }
+
+ return result, nil
+}
+
+// WithImageSubstitutors sets the image substitutors for a container
+func WithImageSubstitutors(fn ...ImageSubstitutor) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.ImageSubstitutors = fn
+
+ return nil
+ }
+}
+
+// WithLogConsumers sets the log consumers for a container
+func WithLogConsumers(consumer ...LogConsumer) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ if req.LogConsumerCfg == nil {
+ req.LogConsumerCfg = &LogConsumerConfig{}
+ }
+
+ req.LogConsumerCfg.Consumers = consumer
+ return nil
+ }
+}
+
+// Executable represents an executable command to be sent to a container, including options,
+// as part of the different lifecycle hooks.
+type Executable interface {
+ AsCommand() []string
+ // Options can container two different types of options:
+ // - Docker's ExecConfigs (WithUser, WithWorkingDir, WithEnv, etc.)
+ // - testcontainers' ProcessOptions (i.e. Multiplexed response)
+ Options() []tcexec.ProcessOption
+}
+
+// ExecOptions is a struct that provides a default implementation for the Options method
+// of the Executable interface.
+type ExecOptions struct {
+ opts []tcexec.ProcessOption
+}
+
+func (ce ExecOptions) Options() []tcexec.ProcessOption {
+ return ce.opts
+}
+
+// RawCommand is a type that implements Executable and represents a command to be sent to a container
+type RawCommand struct {
+ ExecOptions
+ cmds []string
+}
+
+func NewRawCommand(cmds []string) RawCommand {
+ return RawCommand{
+ cmds: cmds,
+ ExecOptions: ExecOptions{
+ opts: []tcexec.ProcessOption{},
+ },
+ }
+}
+
+// AsCommand returns the command as a slice of strings
+func (r RawCommand) AsCommand() []string {
+ return r.cmds
+}
+
+// WithStartupCommand will execute the command representation of each Executable into the container.
+// It will leverage the container lifecycle hooks to call the command right after the container
+// is started.
+func WithStartupCommand(execs ...Executable) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ startupCommandsHook := ContainerLifecycleHooks{
+ PostStarts: []ContainerHook{},
+ }
+
+ for _, exec := range execs {
+ execFn := func(ctx context.Context, c Container) error {
+ _, _, err := c.Exec(ctx, exec.AsCommand(), exec.Options()...)
+ return err
+ }
+
+ startupCommandsHook.PostStarts = append(startupCommandsHook.PostStarts, execFn)
+ }
+
+ req.LifecycleHooks = append(req.LifecycleHooks, startupCommandsHook)
+
+ return nil
+ }
+}
+
+// WithAfterReadyCommand will execute the command representation of each Executable into the container.
+// It will leverage the container lifecycle hooks to call the command right after the container
+// is ready.
+func WithAfterReadyCommand(execs ...Executable) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ postReadiesHook := []ContainerHook{}
+
+ for _, exec := range execs {
+ execFn := func(ctx context.Context, c Container) error {
+ _, _, err := c.Exec(ctx, exec.AsCommand(), exec.Options()...)
+ return err
+ }
+
+ postReadiesHook = append(postReadiesHook, execFn)
+ }
+
+ req.LifecycleHooks = append(req.LifecycleHooks, ContainerLifecycleHooks{
+ PostReadies: postReadiesHook,
+ })
+
+ return nil
+ }
+}
+
+// WithWaitStrategy sets the wait strategy for a container, using 60 seconds as deadline
+func WithWaitStrategy(strategies ...wait.Strategy) CustomizeRequestOption {
+ return WithWaitStrategyAndDeadline(60*time.Second, strategies...)
+}
+
+// WithWaitStrategyAndDeadline sets the wait strategy for a container, including deadline
+func WithWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Strategy) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ req.WaitingFor = wait.ForAll(strategies...).WithDeadline(deadline)
+
+ return nil
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/parallel.go b/vendor/github.com/testcontainers/testcontainers-go/parallel.go
new file mode 100644
index 0000000..0349023
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/parallel.go
@@ -0,0 +1,110 @@
+package testcontainers
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+const (
+ defaultWorkersCount = 8
+)
+
+type ParallelContainerRequest []GenericContainerRequest
+
+// ParallelContainersOptions represents additional options for parallel running
+type ParallelContainersOptions struct {
+ WorkersCount int // count of parallel workers. If field empty(zero), default value will be 'defaultWorkersCount'
+}
+
+// ParallelContainersRequestError represents error from parallel request
+type ParallelContainersRequestError struct {
+ Request GenericContainerRequest
+ Error error
+}
+
+type ParallelContainersError struct {
+ Errors []ParallelContainersRequestError
+}
+
+func (gpe ParallelContainersError) Error() string {
+ return fmt.Sprintf("%v", gpe.Errors)
+}
+
+// parallelContainersResult represents result.
+type parallelContainersResult struct {
+ ParallelContainersRequestError
+ Container Container
+}
+
+func parallelContainersRunner(
+ ctx context.Context,
+ requests <-chan GenericContainerRequest,
+ results chan<- parallelContainersResult,
+ wg *sync.WaitGroup,
+) {
+ defer wg.Done()
+ for req := range requests {
+ c, err := GenericContainer(ctx, req)
+ res := parallelContainersResult{Container: c}
+ if err != nil {
+ res.Request = req
+ res.Error = err
+ }
+ results <- res
+ }
+}
+
+// ParallelContainers creates a generic containers with parameters and run it in parallel mode
+func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt ParallelContainersOptions) ([]Container, error) {
+ if opt.WorkersCount == 0 {
+ opt.WorkersCount = defaultWorkersCount
+ }
+
+ tasksChanSize := opt.WorkersCount
+ if tasksChanSize > len(reqs) {
+ tasksChanSize = len(reqs)
+ }
+
+ tasksChan := make(chan GenericContainerRequest, tasksChanSize)
+ resultsChan := make(chan parallelContainersResult, tasksChanSize)
+ done := make(chan struct{})
+
+ var wg sync.WaitGroup
+ wg.Add(tasksChanSize)
+
+ // run workers
+ for i := 0; i < tasksChanSize; i++ {
+ go parallelContainersRunner(ctx, tasksChan, resultsChan, &wg)
+ }
+
+ var errs []ParallelContainersRequestError
+ containers := make([]Container, 0, len(reqs))
+ go func() {
+ defer close(done)
+ for res := range resultsChan {
+ if res.Error != nil {
+ errs = append(errs, res.ParallelContainersRequestError)
+ } else {
+ containers = append(containers, res.Container)
+ }
+ }
+ }()
+
+ for _, req := range reqs {
+ tasksChan <- req
+ }
+ close(tasksChan)
+
+ wg.Wait()
+
+ close(resultsChan)
+
+ <-done
+
+ if len(errs) != 0 {
+ return containers, ParallelContainersError{Errors: errs}
+ }
+
+ return containers, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go
new file mode 100644
index 0000000..b9fc970
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go
@@ -0,0 +1,427 @@
+package testcontainers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/google/uuid"
+ "golang.org/x/crypto/ssh"
+
+ "github.com/testcontainers/testcontainers-go/internal/core/network"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+const (
+ // hubSshdImage {
+ sshdImage string = "testcontainers/sshd:1.2.0"
+ // }
+
+ // HostInternal is the internal hostname used to reach the host from the container,
+ // using the SSHD container as a bridge.
+ HostInternal string = "host.testcontainers.internal"
+ user string = "root"
+ sshPort = "22/tcp"
+)
+
+// sshPassword is a random password generated for the SSHD container.
+var sshPassword = uuid.NewString()
+
+// exposeHostPorts performs all the necessary steps to expose the host ports to the container, leveraging
+// the SSHD container to create the tunnel, and the container lifecycle hooks to manage the tunnel lifecycle.
+// At least one port must be provided to expose.
+// The steps are:
+// 1. Create a new SSHD container.
+// 2. Expose the host ports to the container after the container is ready.
+// 3. Close the SSH sessions before killing the container.
+func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) (sshdConnectHook ContainerLifecycleHooks, err error) {
+ if len(ports) == 0 {
+ return sshdConnectHook, errors.New("no ports to expose")
+ }
+
+ // Use the first network of the container to connect to the SSHD container.
+ var sshdFirstNetwork string
+ if len(req.Networks) > 0 {
+ sshdFirstNetwork = req.Networks[0]
+ }
+
+ if sshdFirstNetwork == "bridge" && len(req.Networks) > 1 {
+ sshdFirstNetwork = req.Networks[1]
+ }
+
+ opts := []ContainerCustomizer{}
+ if len(req.Networks) > 0 {
+ // get the first network of the container to connect the SSHD container to it.
+ nw, err := network.GetByName(ctx, sshdFirstNetwork)
+ if err != nil {
+ return sshdConnectHook, fmt.Errorf("get network %q: %w", sshdFirstNetwork, err)
+ }
+
+ dockerNw := DockerNetwork{
+ ID: nw.ID,
+ Name: nw.Name,
+ }
+
+ // WithNetwork reuses an already existing network, attaching the container to it.
+ // Finally it sets the network alias on that network to the given alias.
+ // TODO: Using an anonymous function to avoid cyclic dependencies with the network package.
+ withNetwork := func(aliases []string, nw *DockerNetwork) CustomizeRequestOption {
+ return func(req *GenericContainerRequest) error {
+ networkName := nw.Name
+
+ // attaching to the network because it was created with success or it already existed.
+ req.Networks = append(req.Networks, networkName)
+
+ if req.NetworkAliases == nil {
+ req.NetworkAliases = make(map[string][]string)
+ }
+ req.NetworkAliases[networkName] = aliases
+ return nil
+ }
+ }
+
+ opts = append(opts, withNetwork([]string{HostInternal}, &dockerNw))
+ }
+
+ // start the SSHD container with the provided options
+ sshdContainer, err := newSshdContainer(ctx, opts...)
+ // Ensure the SSHD container is stopped and removed in case of error.
+ defer func() {
+ if err != nil {
+ err = errors.Join(err, TerminateContainer(sshdContainer))
+ }
+ }()
+ if err != nil {
+ return sshdConnectHook, fmt.Errorf("new sshd container: %w", err)
+ }
+
+ // IP in the first network of the container.
+ inspect, err := sshdContainer.Inspect(ctx)
+ if err != nil {
+ return sshdConnectHook, fmt.Errorf("inspect sshd container: %w", err)
+ }
+
+ // TODO: remove once we have docker context support via #2810
+ sshdIP := inspect.NetworkSettings.IPAddress
+ if sshdIP == "" {
+ single := len(inspect.NetworkSettings.Networks) == 1
+ for name, network := range inspect.NetworkSettings.Networks {
+ if name == sshdFirstNetwork || single {
+ sshdIP = network.IPAddress
+ break
+ }
+ }
+ }
+
+ if sshdIP == "" {
+ return sshdConnectHook, errors.New("sshd container IP not found")
+ }
+
+ if req.HostConfigModifier == nil {
+ req.HostConfigModifier = func(_ *container.HostConfig) {}
+ }
+
+ // do not override the original HostConfigModifier
+ originalHCM := req.HostConfigModifier
+ req.HostConfigModifier = func(hostConfig *container.HostConfig) {
+ // adding the host internal alias to the container as an extra host
+ // to allow the container to reach the SSHD container.
+ hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, fmt.Sprintf("%s:%s", HostInternal, sshdIP))
+
+ modes := []container.NetworkMode{container.NetworkMode(sshdFirstNetwork), "none", "host"}
+ // if the container is not in one of the modes, attach it to the first network of the SSHD container
+ found := false
+ for _, mode := range modes {
+ if hostConfig.NetworkMode == mode {
+ found = true
+ break
+ }
+ }
+ if !found {
+ req.Networks = append(req.Networks, sshdFirstNetwork)
+ }
+
+ // invoke the original HostConfigModifier with the updated hostConfig
+ originalHCM(hostConfig)
+ }
+
+ stopHooks := []ContainerHook{
+ func(ctx context.Context, _ Container) error {
+ if ctx.Err() != nil {
+ // Context already canceled, need to create a new one to ensure
+ // the SSH session is closed.
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ }
+
+ return TerminateContainer(sshdContainer, StopContext(ctx))
+ },
+ }
+
+ // after the container is ready, create the SSH tunnel
+ // for each exposed port from the host.
+ sshdConnectHook = ContainerLifecycleHooks{
+ PostReadies: []ContainerHook{
+ func(ctx context.Context, _ Container) error {
+ return sshdContainer.exposeHostPort(ctx, req.HostAccessPorts...)
+ },
+ },
+ PreStops: stopHooks,
+ PreTerminates: stopHooks,
+ }
+
+ return sshdConnectHook, nil
+}
+
+// newSshdContainer creates a new SSHD container with the provided options.
+func newSshdContainer(ctx context.Context, opts ...ContainerCustomizer) (*sshdContainer, error) {
+ req := GenericContainerRequest{
+ ContainerRequest: ContainerRequest{
+ Image: sshdImage,
+ ExposedPorts: []string{sshPort},
+ Env: map[string]string{"PASSWORD": sshPassword},
+ WaitingFor: wait.ForListeningPort(sshPort),
+ },
+ Started: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt.Customize(&req); err != nil {
+ return nil, err
+ }
+ }
+
+ c, err := GenericContainer(ctx, req)
+ var sshd *sshdContainer
+ if c != nil {
+ sshd = &sshdContainer{Container: c}
+ }
+
+ if err != nil {
+ return sshd, fmt.Errorf("generic container: %w", err)
+ }
+
+ if err = sshd.clientConfig(ctx); err != nil {
+ // Return the container and the error to the caller to handle it.
+ return sshd, err
+ }
+
+ return sshd, nil
+}
+
+// sshdContainer represents the SSHD container type used for the port forwarding container.
+// It's an internal type that extends the DockerContainer type, to add the SSH tunnelling capabilities.
+type sshdContainer struct {
+ Container
+ port string
+ sshConfig *ssh.ClientConfig
+ portForwarders []*portForwarder
+}
+
+// Terminate stops the container and closes the SSH session
+func (sshdC *sshdContainer) Terminate(ctx context.Context, opts ...TerminateOption) error {
+ return errors.Join(
+ sshdC.closePorts(),
+ sshdC.Container.Terminate(ctx, opts...),
+ )
+}
+
+// Stop stops the container and closes the SSH session
+func (sshdC *sshdContainer) Stop(ctx context.Context, timeout *time.Duration) error {
+ return errors.Join(
+ sshdC.closePorts(),
+ sshdC.Container.Stop(ctx, timeout),
+ )
+}
+
+// closePorts closes all port forwarders.
+func (sshdC *sshdContainer) closePorts() error {
+ var errs []error
+ for _, pfw := range sshdC.portForwarders {
+ if err := pfw.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ sshdC.portForwarders = nil // Ensure the port forwarders are not used after closing.
+ return errors.Join(errs...)
+}
+
+// clientConfig sets up the SSHD client configuration.
+func (sshdC *sshdContainer) clientConfig(ctx context.Context) error {
+ mappedPort, err := sshdC.MappedPort(ctx, sshPort)
+ if err != nil {
+ return fmt.Errorf("mapped port: %w", err)
+ }
+
+ sshdC.port = mappedPort.Port()
+ sshdC.sshConfig = &ssh.ClientConfig{
+ User: user,
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+ Auth: []ssh.AuthMethod{ssh.Password(sshPassword)},
+ }
+
+ return nil
+}
+
+// exposeHostPort exposes the host ports to the container.
+func (sshdC *sshdContainer) exposeHostPort(ctx context.Context, ports ...int) (err error) {
+ defer func() {
+ if err != nil {
+ err = errors.Join(err, sshdC.closePorts())
+ }
+ }()
+ for _, port := range ports {
+ pf, err := newPortForwarder(ctx, "localhost:"+sshdC.port, sshdC.sshConfig, port)
+ if err != nil {
+ return fmt.Errorf("new port forwarder: %w", err)
+ }
+
+ sshdC.portForwarders = append(sshdC.portForwarders, pf)
+ }
+
+ return nil
+}
+
+// portForwarder forwards a port from the container to the host.
+type portForwarder struct {
+ client *ssh.Client
+ listener net.Listener
+ dialTimeout time.Duration
+ localAddr string
+ ctx context.Context
+ cancel context.CancelFunc
+
+ // closeMtx protects the close operation
+ closeMtx sync.Mutex
+ closeErr error
+}
+
+// newPortForwarder creates a new running portForwarder for the given port.
+// The context is only used for the initial SSH connection.
+func newPortForwarder(ctx context.Context, sshDAddr string, sshConfig *ssh.ClientConfig, port int) (pf *portForwarder, err error) {
+ var d net.Dialer
+ conn, err := d.DialContext(ctx, "tcp", sshDAddr)
+ if err != nil {
+ return nil, fmt.Errorf("ssh dial: %w", err)
+ }
+
+ // Ensure the connection is closed in case of error.
+ defer func() {
+ if err != nil {
+ err = errors.Join(err, conn.Close())
+ }
+ }()
+
+ c, chans, reqs, err := ssh.NewClientConn(conn, sshDAddr, sshConfig)
+ if err != nil {
+ return nil, fmt.Errorf("ssh new client conn: %w", err)
+ }
+
+ client := ssh.NewClient(c, chans, reqs)
+
+ listener, err := client.Listen("tcp", fmt.Sprintf("localhost:%d", port))
+ if err != nil {
+ return nil, fmt.Errorf("listening on remote port %d: %w", port, err)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ pf = &portForwarder{
+ client: client,
+ listener: listener,
+ localAddr: fmt.Sprintf("localhost:%d", port),
+ ctx: ctx,
+ cancel: cancel,
+ dialTimeout: time.Second * 2,
+ }
+
+ go pf.run()
+
+ return pf, nil
+}
+
+// Close closes the port forwarder.
+func (pf *portForwarder) Close() error {
+ pf.closeMtx.Lock()
+ defer pf.closeMtx.Unlock()
+
+ select {
+ case <-pf.ctx.Done():
+ // Already closed.
+ return pf.closeErr
+ default:
+ }
+
+ var errs []error
+ if err := pf.listener.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close listener: %w", err))
+ }
+ if err := pf.client.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("close client: %w", err))
+ }
+
+ pf.closeErr = errors.Join(errs...)
+ pf.cancel()
+
+ return pf.closeErr
+}
+
+// run forwards the port from the remote connection to the local connection.
+func (pf *portForwarder) run() {
+ for {
+ remote, err := pf.listener.Accept()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // The listener has been closed.
+ return
+ }
+
+ // Ignore errors as they are transient and we want requests to
+ // continue to be accepted.
+ continue
+ }
+
+ go pf.tunnel(remote)
+ }
+}
+
+// tunnel runs a tunnel between two connections; as soon as the forwarder
+// context is cancelled or one connection copies returns, irrespective of
+// the error, both connections are closed.
+func (pf *portForwarder) tunnel(remote net.Conn) {
+ defer remote.Close()
+
+ ctx, cancel := context.WithTimeout(pf.ctx, pf.dialTimeout)
+ defer cancel()
+
+ var dialer net.Dialer
+ local, err := dialer.DialContext(ctx, "tcp", pf.localAddr)
+ if err != nil {
+ // Nothing we can do with the error.
+ return
+ }
+ defer local.Close()
+
+ ctx, cancel = context.WithCancel(pf.ctx)
+
+ go func() {
+ defer cancel()
+ io.Copy(local, remote) //nolint:errcheck // Nothing useful we can do with the error.
+ }()
+
+ go func() {
+ defer cancel()
+ io.Copy(remote, local) //nolint:errcheck // Nothing useful we can do with the error.
+ }()
+
+ // Wait for the context to be done before returning which triggers
+ // both connections to close. This is done to prevent the copies
+ // blocking forever on unused connections.
+ <-ctx.Done()
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/provider.go b/vendor/github.com/testcontainers/testcontainers-go/provider.go
new file mode 100644
index 0000000..d2347b7
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/provider.go
@@ -0,0 +1,155 @@
+package testcontainers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/testcontainers/testcontainers-go/internal/config"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+// possible provider types
+const (
+ ProviderDefault ProviderType = iota // default will auto-detect provider from DOCKER_HOST environment variable
+ ProviderDocker
+ ProviderPodman
+)
+
+type (
+ // ProviderType is an enum for the possible providers
+ ProviderType int
+
+ // GenericProviderOptions defines options applicable to all providers
+ GenericProviderOptions struct {
+ Logger log.Logger
+ defaultNetwork string
+ }
+
+ // GenericProviderOption defines a common interface to modify GenericProviderOptions
+ // These options can be passed to GetProvider in a variadic way to customize the returned GenericProvider instance
+ GenericProviderOption interface {
+ ApplyGenericTo(opts *GenericProviderOptions)
+ }
+
+ // GenericProviderOptionFunc is a shorthand to implement the GenericProviderOption interface
+ GenericProviderOptionFunc func(opts *GenericProviderOptions)
+
+ // DockerProviderOptions defines options applicable to DockerProvider
+ DockerProviderOptions struct {
+ defaultBridgeNetworkName string
+ *GenericProviderOptions
+ }
+
+ // DockerProviderOption defines a common interface to modify DockerProviderOptions
+ // These can be passed to NewDockerProvider in a variadic way to customize the returned DockerProvider instance
+ DockerProviderOption interface {
+ ApplyDockerTo(opts *DockerProviderOptions)
+ }
+
+ // DockerProviderOptionFunc is a shorthand to implement the DockerProviderOption interface
+ DockerProviderOptionFunc func(opts *DockerProviderOptions)
+)
+
+func (f DockerProviderOptionFunc) ApplyDockerTo(opts *DockerProviderOptions) {
+ f(opts)
+}
+
+func Generic2DockerOptions(opts ...GenericProviderOption) []DockerProviderOption {
+ converted := make([]DockerProviderOption, 0, len(opts))
+ for _, o := range opts {
+ switch c := o.(type) {
+ case DockerProviderOption:
+ converted = append(converted, c)
+ default:
+ converted = append(converted, DockerProviderOptionFunc(func(opts *DockerProviderOptions) {
+ o.ApplyGenericTo(opts.GenericProviderOptions)
+ }))
+ }
+ }
+
+ return converted
+}
+
+func WithDefaultBridgeNetwork(bridgeNetworkName string) DockerProviderOption {
+ return DockerProviderOptionFunc(func(opts *DockerProviderOptions) {
+ opts.defaultBridgeNetworkName = bridgeNetworkName
+ })
+}
+
+func (f GenericProviderOptionFunc) ApplyGenericTo(opts *GenericProviderOptions) {
+ f(opts)
+}
+
+// ContainerProvider allows the creation of containers on an arbitrary system
+type ContainerProvider interface {
+ Close() error // close the provider
+ CreateContainer(context.Context, ContainerRequest) (Container, error) // create a container without starting it
+ ReuseOrCreateContainer(context.Context, ContainerRequest) (Container, error) // reuses a container if it exists or creates a container without starting
+ RunContainer(context.Context, ContainerRequest) (Container, error) // create a container and start it
+ Health(context.Context) error
+ Config() TestcontainersConfig
+}
+
+// GetProvider provides the provider implementation for a certain type
+func (t ProviderType) GetProvider(opts ...GenericProviderOption) (GenericProvider, error) {
+ opt := &GenericProviderOptions{
+ Logger: log.Default(),
+ }
+
+ for _, o := range opts {
+ o.ApplyGenericTo(opt)
+ }
+
+ pt := t
+ if pt == ProviderDefault && strings.Contains(os.Getenv("DOCKER_HOST"), "podman.sock") {
+ pt = ProviderPodman
+ }
+
+ switch pt {
+ case ProviderDefault, ProviderDocker:
+ providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Bridge))
+ provider, err := NewDockerProvider(providerOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("%w, failed to create Docker provider", err)
+ }
+ return provider, nil
+ case ProviderPodman:
+ providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Podman))
+ provider, err := NewDockerProvider(providerOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("%w, failed to create Docker provider", err)
+ }
+ return provider, nil
+ }
+ return nil, errors.New("unknown provider")
+}
+
+// NewDockerProvider creates a Docker provider with the EnvClient
+func NewDockerProvider(provOpts ...DockerProviderOption) (*DockerProvider, error) {
+ o := &DockerProviderOptions{
+ GenericProviderOptions: &GenericProviderOptions{
+ Logger: log.Default(),
+ },
+ }
+
+ for idx := range provOpts {
+ provOpts[idx].ApplyDockerTo(o)
+ }
+
+ ctx := context.Background()
+ c, err := NewDockerClientWithOpts(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &DockerProvider{
+ DockerProviderOptions: o,
+ host: core.MustExtractDockerHost(ctx),
+ client: c,
+ config: config.Read(),
+ }, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go
new file mode 100644
index 0000000..7b2d8b9
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/reaper.go
@@ -0,0 +1,580 @@
+package testcontainers
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/errdefs"
+ "github.com/docker/go-connections/nat"
+
+ "github.com/testcontainers/testcontainers-go/internal/config"
+ "github.com/testcontainers/testcontainers-go/internal/core"
+ "github.com/testcontainers/testcontainers-go/log"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+const (
+ // Deprecated: it has been replaced by the internal core.LabelLang
+ TestcontainerLabel = "org.testcontainers.golang"
+ // Deprecated: it has been replaced by the internal core.LabelSessionID
+ TestcontainerLabelSessionID = TestcontainerLabel + ".sessionId"
+ // Deprecated: it has been replaced by the internal core.LabelReaper
+ TestcontainerLabelIsReaper = TestcontainerLabel + ".reaper"
+)
+
+var (
+ // Deprecated: it has been replaced by an internal value
+ ReaperDefaultImage = config.ReaperDefaultImage
+
+ // defaultReaperPort is the default port that the reaper listens on if not
+ // overridden by the RYUK_PORT environment variable.
+ defaultReaperPort = nat.Port("8080/tcp")
+
+ // errReaperNotFound is returned when no reaper container is found.
+ errReaperNotFound = errors.New("reaper not found")
+
+ // errReaperDisabled is returned if a reaper is requested but the
+ // config has it disabled.
+ errReaperDisabled = errors.New("reaper disabled")
+
+ // spawner is the singleton instance of reaperSpawner.
+ spawner = &reaperSpawner{}
+
+ // reaperAck is the expected response from the reaper container.
+ reaperAck = []byte("ACK\n")
+)
+
+// ReaperProvider represents a provider for the reaper to run itself with
+// The ContainerProvider interface should usually satisfy this as well, so it is pluggable
+type ReaperProvider interface {
+ RunContainer(ctx context.Context, req ContainerRequest) (Container, error)
+ Config() TestcontainersConfig
+}
+
+// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use
+// Deprecated: it's not possible to create a reaper any more. Compose module uses this method
+// to create a reaper for the compose stack.
+//
+// The caller must call Connect at least once on the returned Reaper and use the returned
+// result otherwise the reaper will be kept open until the process exits.
+func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, _ string) (*Reaper, error) {
+ reaper, err := spawner.reaper(ctx, sessionID, provider)
+ if err != nil {
+ return nil, fmt.Errorf("reaper: %w", err)
+ }
+
+ return reaper, nil
+}
+
+// reaperContainerNameFromSessionID returns the container name that uniquely
+// identifies the container based on the session id.
+func reaperContainerNameFromSessionID(sessionID string) string {
+ // The session id is 64 characters, so we will not hit the limit of 128
+ // characters for container names.
+ return "reaper_" + sessionID
+}
+
+// reaperSpawner is a singleton that manages the reaper container.
+type reaperSpawner struct {
+ instance *Reaper
+ mtx sync.Mutex
+}
+
+// port returns the port that a new reaper should listen on.
+func (r *reaperSpawner) port() nat.Port {
+ if port := os.Getenv("RYUK_PORT"); port != "" {
+ natPort, err := nat.NewPort("tcp", port)
+ if err != nil {
+ panic(fmt.Sprintf("invalid RYUK_PORT value %q: %s", port, err))
+ }
+ return natPort
+ }
+
+ return defaultReaperPort
+}
+
+// backoff returns a backoff policy for the reaper spawner.
+// It will take at most 20 seconds, doing each attempt every 100ms - 250ms.
+func (r *reaperSpawner) backoff() *backoff.ExponentialBackOff {
+ // We want random intervals between 100ms and 250ms for concurrent executions
+ // to not be synchronized: it could be the case that multiple executions of this
+ // function happen at the same time (specifically when called from a different test
+ // process execution), and we want to avoid that they all try to find the reaper
+ // container at the same time.
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: time.Millisecond * 100,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ // Adjust MaxInterval to compensate for randomization factor which can be added to
+ // returned interval so we have a maximum of 250ms.
+ MaxInterval: time.Duration(float64(time.Millisecond*250) * backoff.DefaultRandomizationFactor),
+ MaxElapsedTime: time.Second * 20,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ return b
+}
+
+// cleanup terminates the reaper container if set.
+func (r *reaperSpawner) cleanup() error {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ return r.cleanupLocked()
+}
+
+// cleanupLocked terminates the reaper container if set.
+// It must be called with the lock held.
+func (r *reaperSpawner) cleanupLocked() error {
+ if r.instance == nil {
+ return nil
+ }
+
+ err := TerminateContainer(r.instance.container)
+ r.instance = nil
+
+ return err
+}
+
+// lookupContainer returns a DockerContainer type with the reaper container in the case
+// it's found in the running state, and including the labels for sessionID, reaper, and ryuk.
+// It will perform a retry with exponential backoff to allow for the container to be started and
+// avoid potential false negatives.
+func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) (*DockerContainer, error) {
+ dockerClient, err := NewDockerClientWithOpts(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("new client: %w", err)
+ }
+ defer dockerClient.Close()
+
+ provider, err := NewDockerProvider()
+ if err != nil {
+ return nil, fmt.Errorf("new provider: %w", err)
+ }
+
+ provider.SetClient(dockerClient)
+
+ opts := container.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(
+ filters.Arg("label", fmt.Sprintf("%s=%s", core.LabelSessionID, sessionID)),
+ filters.Arg("label", fmt.Sprintf("%s=%t", core.LabelReaper, true)),
+ filters.Arg("label", fmt.Sprintf("%s=%t", core.LabelRyuk, true)),
+ filters.Arg("name", reaperContainerNameFromSessionID(sessionID)),
+ ),
+ }
+
+ return backoff.RetryWithData(
+ func() (*DockerContainer, error) {
+ resp, err := dockerClient.ContainerList(ctx, opts)
+ if err != nil {
+ return nil, fmt.Errorf("container list: %w", err)
+ }
+
+ if len(resp) == 0 {
+ // No reaper container not found.
+ return nil, backoff.Permanent(errReaperNotFound)
+ }
+
+ if len(resp) > 1 {
+ return nil, fmt.Errorf("found %d reaper containers for session ID %q", len(resp), sessionID)
+ }
+
+ r, err := provider.ContainerFromType(ctx, resp[0])
+ if err != nil {
+ return nil, fmt.Errorf("from docker: %w", err)
+ }
+
+ switch {
+ case r.healthStatus == types.Healthy,
+ r.healthStatus == types.NoHealthcheck:
+ return r, nil
+ case r.healthStatus != "":
+ return nil, fmt.Errorf("container not healthy: %s", r.healthStatus)
+ }
+
+ return r, nil
+ },
+ backoff.WithContext(r.backoff(), ctx),
+ )
+}
+
+// isRunning returns an error if the container is not running.
+func (r *reaperSpawner) isRunning(ctx context.Context, ctr Container) error {
+ state, err := ctr.State(ctx)
+ if err != nil {
+ return fmt.Errorf("container state: %w", err)
+ }
+
+ if !state.Running {
+ // Use NotFound error to indicate the container is not running
+ // and should be recreated.
+ return errdefs.NotFound(fmt.Errorf("container state: %s", state.Status))
+ }
+
+ return nil
+}
+
+// retryError returns a permanent error if the error is not considered retryable.
+func (r *reaperSpawner) retryError(err error) error {
+ var timeout interface {
+ Timeout() bool
+ }
+ switch {
+ case isCleanupSafe(err),
+ createContainerFailDueToNameConflictRegex.MatchString(err.Error()),
+ errors.Is(err, syscall.ECONNREFUSED),
+ errors.Is(err, syscall.ECONNRESET),
+ errors.Is(err, syscall.ECONNABORTED),
+ errors.Is(err, syscall.ETIMEDOUT),
+ errors.Is(err, os.ErrDeadlineExceeded),
+ errors.As(err, &timeout) && timeout.Timeout(),
+ errors.Is(err, context.DeadlineExceeded),
+ errors.Is(err, context.Canceled):
+ // Retryable error.
+ return err
+ default:
+ return backoff.Permanent(err)
+ }
+}
+
+// reaper returns an existing Reaper instance if it exists and is running, otherwise
+// a new Reaper instance will be created with a sessionID to identify containers in
+// the same test session/program. If connect is true, the reaper will be connected
+// to the reaper container.
+// Returns an error if config.RyukDisabled is true.
+//
+// Safe for concurrent calls.
+func (r *reaperSpawner) reaper(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
+ if config.Read().RyukDisabled {
+ return nil, errReaperDisabled
+ }
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ return backoff.RetryWithData(
+ r.retryLocked(ctx, sessionID, provider),
+ backoff.WithContext(r.backoff(), ctx),
+ )
+}
+
+// retryLocked returns a function that can be used to create or reuse a reaper container.
+// If connect is true, the reaper will be connected to the reaper container.
+// It must be called with the lock held.
+func (r *reaperSpawner) retryLocked(ctx context.Context, sessionID string, provider ReaperProvider) func() (*Reaper, error) {
+ return func() (reaper *Reaper, err error) {
+ reaper, err = r.reuseOrCreate(ctx, sessionID, provider)
+ // Ensure that the reaper is terminated if an error occurred.
+ defer func() {
+ if err != nil {
+ if reaper != nil {
+ err = errors.Join(err, TerminateContainer(reaper.container))
+ }
+ err = r.retryError(errors.Join(err, r.cleanupLocked()))
+ }
+ }()
+ if err != nil {
+ return nil, err
+ }
+
+ if err = r.isRunning(ctx, reaper.container); err != nil {
+ return nil, err
+ }
+
+ // Check we can still connect.
+ termSignal, err := reaper.connect(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("connect: %w", err)
+ }
+
+ reaper.setOrSignal(termSignal)
+
+ r.instance = reaper
+
+ return reaper, nil
+ }
+}
+
+// reuseOrCreate returns an existing Reaper instance if it exists, otherwise a new Reaper instance.
+func (r *reaperSpawner) reuseOrCreate(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
+ if r.instance != nil {
+ // We already have an associated reaper.
+ return r.instance, nil
+ }
+
+ // Look for an existing reaper created in the same test session but in a
+ // different test process execution e.g. when running tests in parallel.
+ container, err := r.lookupContainer(context.Background(), sessionID)
+ if err != nil {
+ if !errors.Is(err, errReaperNotFound) {
+ return nil, fmt.Errorf("look up container: %w", err)
+ }
+
+ // The reaper container was not found, continue to create a new one.
+ reaper, err := r.newReaper(ctx, sessionID, provider)
+ if err != nil {
+ return nil, fmt.Errorf("new reaper: %w", err)
+ }
+
+ return reaper, nil
+ }
+
+ // A reaper container exists re-use it.
+ reaper, err := r.fromContainer(ctx, sessionID, provider, container)
+ if err != nil {
+ return nil, fmt.Errorf("from container %q: %w", container.ID[:8], err)
+ }
+
+ return reaper, nil
+}
+
+// fromContainer constructs a Reaper from an already running reaper DockerContainer.
+func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, provider ReaperProvider, dockerContainer *DockerContainer) (*Reaper, error) {
+ log.Printf("⏳ Waiting for Reaper %q to be ready", dockerContainer.ID[:8])
+
+ // Reusing an existing container so we determine the port from the container's exposed ports.
+ if err := wait.ForExposedPort().
+ WithPollInterval(100*time.Millisecond).
+ SkipInternalCheck().
+ WaitUntilReady(ctx, dockerContainer); err != nil {
+ return nil, fmt.Errorf("wait for reaper %s: %w", dockerContainer.ID[:8], err)
+ }
+
+ endpoint, err := dockerContainer.Endpoint(ctx, "")
+ if err != nil {
+ return nil, fmt.Errorf("port endpoint: %w", err)
+ }
+
+ log.Printf("🔥 Reaper obtained from Docker for this test session %s", dockerContainer.ID[:8])
+
+ return &Reaper{
+ Provider: provider,
+ SessionID: sessionID,
+ Endpoint: endpoint,
+ container: dockerContainer,
+ }, nil
+}
+
+// newReaper creates a connected Reaper with a sessionID to identify containers
+// and a provider to use.
+func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) {
+ dockerHostMount := core.MustExtractDockerSocket(ctx)
+
+ port := r.port()
+ tcConfig := provider.Config().Config
+ req := ContainerRequest{
+ Image: config.ReaperDefaultImage,
+ ExposedPorts: []string{string(port)},
+ Labels: core.DefaultLabels(sessionID),
+ Privileged: tcConfig.RyukPrivileged,
+ WaitingFor: wait.ForListeningPort(port),
+ Name: reaperContainerNameFromSessionID(sessionID),
+ HostConfigModifier: func(hc *container.HostConfig) {
+ hc.AutoRemove = true
+ hc.Binds = []string{dockerHostMount + ":/var/run/docker.sock"}
+ hc.NetworkMode = Bridge
+ },
+ Env: map[string]string{},
+ }
+ if to := tcConfig.RyukConnectionTimeout; to > time.Duration(0) {
+ req.Env["RYUK_CONNECTION_TIMEOUT"] = to.String()
+ }
+ if to := tcConfig.RyukReconnectionTimeout; to > time.Duration(0) {
+ req.Env["RYUK_RECONNECTION_TIMEOUT"] = to.String()
+ }
+ if tcConfig.RyukVerbose {
+ req.Env["RYUK_VERBOSE"] = "true"
+ }
+
+ // Setup reaper-specific labels for the reaper container.
+ req.Labels[core.LabelReaper] = "true"
+ req.Labels[core.LabelRyuk] = "true"
+ delete(req.Labels, core.LabelReap)
+
+ // Attach reaper container to a requested network if it is specified
+ if p, ok := provider.(*DockerProvider); ok {
+ defaultNetwork, err := p.ensureDefaultNetwork(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("ensure default network: %w", err)
+ }
+
+ req.Networks = append(req.Networks, defaultNetwork)
+ }
+
+ c, err := provider.RunContainer(ctx, req)
+ defer func() {
+ if err != nil {
+ err = errors.Join(err, TerminateContainer(c))
+ }
+ }()
+ if err != nil {
+ return nil, fmt.Errorf("run container: %w", err)
+ }
+
+ endpoint, err := c.PortEndpoint(ctx, port, "")
+ if err != nil {
+ return nil, fmt.Errorf("port endpoint: %w", err)
+ }
+
+ return &Reaper{
+ Provider: provider,
+ SessionID: sessionID,
+ Endpoint: endpoint,
+ container: c,
+ }, nil
+}
+
+// Reaper is used to start a sidecar container that cleans up resources
+type Reaper struct {
+ Provider ReaperProvider
+ SessionID string
+ Endpoint string
+ container Container
+ mtx sync.Mutex // Protects termSignal.
+ termSignal chan bool
+}
+
+// Connect connects to the reaper container and sends the labels to it
+// so that it can clean up the containers with the same labels.
+//
+// It returns a channel that can be closed to terminate the connection.
+// Returns an error if config.RyukDisabled is true.
+func (r *Reaper) Connect() (chan bool, error) {
+ if config.Read().RyukDisabled {
+ return nil, errReaperDisabled
+ }
+
+ if termSignal := r.useTermSignal(); termSignal != nil {
+ return termSignal, nil
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ return r.connect(ctx)
+}
+
+// close signals the connection to close if needed.
+// Safe for concurrent calls.
+func (r *Reaper) close() {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.termSignal != nil {
+ r.termSignal <- true
+ r.termSignal = nil
+ }
+}
+
+// setOrSignal sets the reapers termSignal field if nil
+// otherwise consumes by sending true to it.
+// Safe for concurrent calls.
+func (r *Reaper) setOrSignal(termSignal chan bool) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.termSignal != nil {
+ // Already have an existing connection, close the new one.
+ termSignal <- true
+ return
+ }
+
+ // First or new unused termSignal, assign for caller to reuse.
+ r.termSignal = termSignal
+}
+
+// useTermSignal if termSignal is not nil returns it
+// and sets it to nil, otherwise returns nil.
+//
+// Safe for concurrent calls.
+func (r *Reaper) useTermSignal() chan bool {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.termSignal == nil {
+ return nil
+ }
+
+ // Use existing connection.
+ term := r.termSignal
+ r.termSignal = nil
+
+ return term
+}
+
+// connect connects to the reaper container and sends the labels to it
+// so that it can clean up the containers with the same labels.
+//
+// It returns a channel that can be sent true to terminate the connection.
+// Returns an error if config.RyukDisabled is true.
+func (r *Reaper) connect(ctx context.Context) (chan bool, error) {
+ var d net.Dialer
+ conn, err := d.DialContext(ctx, "tcp", r.Endpoint)
+ if err != nil {
+ return nil, fmt.Errorf("dial reaper %s: %w", r.Endpoint, err)
+ }
+
+ terminationSignal := make(chan bool)
+ go func() {
+ defer conn.Close()
+ if err := r.handshake(conn); err != nil {
+ log.Printf("Reaper handshake failed: %s", err)
+ }
+ <-terminationSignal
+ }()
+ return terminationSignal, nil
+}
+
+// handshake sends the labels to the reaper container and reads the ACK.
+func (r *Reaper) handshake(conn net.Conn) error {
+ labels := core.DefaultLabels(r.SessionID)
+ labelFilters := make([]string, 0, len(labels))
+ for l, v := range labels {
+ labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v))
+ }
+
+ filters := []byte(strings.Join(labelFilters, "&") + "\n")
+ buf := make([]byte, 4)
+ if _, err := conn.Write(filters); err != nil {
+ return fmt.Errorf("writing filters: %w", err)
+ }
+
+ n, err := io.ReadFull(conn, buf)
+ if err != nil {
+ return fmt.Errorf("read ack: %w", err)
+ }
+
+ if !bytes.Equal(reaperAck, buf[:n]) {
+ // We have received the ACK so all done.
+ return fmt.Errorf("unexpected reaper response: %s", buf[:n])
+ }
+
+ return nil
+}
+
+// Labels returns the container labels to use so that this Reaper cleans them up
+// Deprecated: internally replaced by core.DefaultLabels(sessionID)
+func (r *Reaper) Labels() map[string]string {
+ return GenericLabels()
+}
+
+// isReaperImage returns true if the image name is the reaper image.
+func isReaperImage(name string) bool {
+ return strings.HasSuffix(name, config.ReaperDefaultImage)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt
new file mode 100644
index 0000000..e4db882
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt
@@ -0,0 +1,5 @@
+mkdocs==1.5.3
+mkdocs-codeinclude-plugin==0.2.1
+mkdocs-include-markdown-plugin==6.2.2
+mkdocs-material==9.5.18
+mkdocs-markdownextradata-plugin==0.2.6
diff --git a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt
new file mode 100644
index 0000000..cc1923a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt
@@ -0,0 +1 @@
+3.8
diff --git a/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go b/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go
new file mode 100644
index 0000000..77ba722
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go
@@ -0,0 +1,54 @@
+package testcontainers
+
+import (
+ "context"
+
+ "github.com/testcontainers/testcontainers-go/internal/core"
+)
+
+// Deprecated: use MustExtractDockerHost instead.
+func ExtractDockerSocket() string {
+ return MustExtractDockerSocket(context.Background())
+}
+
+// MustExtractDockerSocket Extracts the docker socket from the different alternatives, removing the socket schema.
+// Use this function to get the docker socket path, not the host (e.g. mounting the socket in a container).
+// This function does not consider Windows containers at the moment.
+// The possible alternatives are:
+//
+// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file.
+// 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable.
+// 3. Using a Docker client, check if the Info().OperatingSystem is "Docker Desktop" and return the default docker socket path for rootless docker.
+// 4. Else, Get the current Docker Host from the existing strategies: see MustExtractDockerHost.
+// 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock)
+// 6. Else, the default location of the docker socket is used (/var/run/docker.sock)
+//
+// It panics if a Docker client cannot be created, or the Docker host cannot be discovered.
+func MustExtractDockerSocket(ctx context.Context) string {
+ return core.MustExtractDockerSocket(ctx)
+}
+
+// SessionID returns a unique session ID for the current test session. Because each Go package
+// will be run in a separate process, we need a way to identify the current test session.
+// By test session, we mean:
+// - a single "go test" invocation (including flags)
+// - a single "go test ./..." invocation (including flags)
+// - the execution of a single test or a set of tests using the IDE
+//
+// As a consequence, with the sole goal of aggregating test execution across multiple
+// packages, this variable will contain the value of the parent process ID (pid) of the current process
+// and its creation date, to use it to generate a unique session ID. We are using the parent pid because
+// the current process will be a child process of:
+// - the process that is running the tests, e.g.: "go test";
+// - the process that is running the application in development mode, e.g. "go run main.go -tags dev";
+// - the process that is running the tests in the IDE, e.g.: "go test ./...".
+//
+// Finally, we will hash the combination of the "testcontainers-go:" string with the parent pid
+// and the creation date of that parent process to generate a unique session ID.
+//
+// This sessionID will be used to:
+// - identify the test session, aggregating the test execution of multiple packages in the same test session.
+// - tag the containers created by testcontainers-go, adding a label to the container with the session ID.
+func SessionID() string {
+ return core.SessionID()
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/testing.go b/vendor/github.com/testcontainers/testcontainers-go/testing.go
new file mode 100644
index 0000000..017f1a4
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/testing.go
@@ -0,0 +1,173 @@
+package testcontainers
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "regexp"
+ "testing"
+
+ "github.com/docker/docker/errdefs"
+ "github.com/stretchr/testify/require"
+)
+
+// errAlreadyInProgress is a regular expression that matches the error for a container
+// removal that is already in progress.
+var errAlreadyInProgress = regexp.MustCompile(`removal of container .* is already in progress`)
+
+// SkipIfProviderIsNotHealthy is a utility function capable of skipping tests
+// if the provider is not healthy, or running at all.
+// This is a function designed to be used in your test, when Docker is not mandatory for CI/CD.
+// In this way tests that depend on Testcontainers won't run if the provider is provisioned correctly.
+func SkipIfProviderIsNotHealthy(t *testing.T) {
+ t.Helper()
+ defer func() {
+ if r := recover(); r != nil {
+ t.Skipf("Recovered from panic: %v. Docker is not running. Testcontainers can't perform is work without it", r)
+ }
+ }()
+
+ ctx := context.Background()
+ provider, err := ProviderDocker.GetProvider()
+ if err != nil {
+ t.Skipf("Docker is not running. Testcontainers can't perform is work without it: %s", err)
+ }
+ err = provider.Health(ctx)
+ if err != nil {
+ t.Skipf("Docker is not running. Testcontainers can't perform is work without it: %s", err)
+ }
+}
+
+// SkipIfDockerDesktop is a utility function capable of skipping tests
+// if tests are run using Docker Desktop.
+func SkipIfDockerDesktop(t *testing.T, ctx context.Context) {
+ t.Helper()
+ cli, err := NewDockerClientWithOpts(ctx)
+ require.NoErrorf(t, err, "failed to create docker client: %s", err)
+
+ info, err := cli.Info(ctx)
+ require.NoErrorf(t, err, "failed to get docker info: %s", err)
+
+ if info.OperatingSystem == "Docker Desktop" {
+ t.Skip("Skipping test that requires host network access when running in Docker Desktop")
+ }
+}
+
+// exampleLogConsumer {
+
+// StdoutLogConsumer is a LogConsumer that prints the log to stdout
+type StdoutLogConsumer struct{}
+
+// Accept prints the log to stdout
+func (lc *StdoutLogConsumer) Accept(l Log) {
+ fmt.Print(string(l.Content))
+}
+
+// }
+
+// CleanupContainer is a helper function that schedules the container
+// to be stopped / terminated when the test ends.
+//
+// This should be called as a defer directly after (before any error check)
+// of [GenericContainer](...) or a modules Run(...) in a test to ensure the
+// container is stopped when the function ends.
+//
+// before any error check. If container is nil, it's a no-op.
+func CleanupContainer(tb testing.TB, ctr Container, options ...TerminateOption) {
+ tb.Helper()
+
+ tb.Cleanup(func() {
+ noErrorOrIgnored(tb, TerminateContainer(ctr, options...))
+ })
+}
+
+// CleanupNetwork is a helper function that schedules the network to be
+// removed when the test ends.
+// This should be the first call after NewNetwork(...) in a test before
+// any error check. If network is nil, it's a no-op.
+func CleanupNetwork(tb testing.TB, network Network) {
+ tb.Helper()
+
+ tb.Cleanup(func() {
+ if !isNil(network) {
+ noErrorOrIgnored(tb, network.Remove(context.Background()))
+ }
+ })
+}
+
+// noErrorOrIgnored is a helper function that checks if the error is nil or an error
+// we can ignore.
+func noErrorOrIgnored(tb testing.TB, err error) {
+ tb.Helper()
+
+ if isCleanupSafe(err) {
+ return
+ }
+
+ require.NoError(tb, err)
+}
+
+// causer is an interface that allows to get the cause of an error.
+type causer interface {
+ Cause() error
+}
+
+// wrapErr is an interface that allows to unwrap an error.
+type wrapErr interface {
+ Unwrap() error
+}
+
+// unwrapErrs is an interface that allows to unwrap multiple errors.
+type unwrapErrs interface {
+ Unwrap() []error
+}
+
+// isCleanupSafe reports whether all errors in err's tree are one of the
+// following, so can safely be ignored:
+// - nil
+// - not found
+// - already in progress
+func isCleanupSafe(err error) bool {
+ if err == nil {
+ return true
+ }
+
+ switch x := err.(type) { //nolint:errorlint // We need to check for interfaces.
+ case errdefs.ErrNotFound:
+ return true
+ case errdefs.ErrConflict:
+ // Terminating a container that is already terminating.
+ if errAlreadyInProgress.MatchString(err.Error()) {
+ return true
+ }
+ return false
+ case causer:
+ return isCleanupSafe(x.Cause())
+ case wrapErr:
+ return isCleanupSafe(x.Unwrap())
+ case unwrapErrs:
+ for _, e := range x.Unwrap() {
+ if !isCleanupSafe(e) {
+ return false
+ }
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+// RequireContainerExec is a helper function that executes a command in a container
+// It insures that there is no error during the execution
+// Finally returns the output of its execution
+func RequireContainerExec(ctx context.Context, t *testing.T, container Container, cmd []string) string {
+ t.Helper()
+
+ code, out, err := container.Exec(ctx, cmd)
+ require.NoError(t, err)
+ require.Zero(t, code)
+
+ checkBytes, err := io.ReadAll(out)
+ require.NoError(t, err)
+ return string(checkBytes)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go
new file mode 100644
index 0000000..fb7eb4e
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go
@@ -0,0 +1,82 @@
+package wait
+
+import (
+ "context"
+ "errors"
+ "time"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*MultiStrategy)(nil)
+ _ StrategyTimeout = (*MultiStrategy)(nil)
+)
+
+type MultiStrategy struct {
+ // all Strategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+ deadline *time.Duration
+
+ // additional properties
+ Strategies []Strategy
+}
+
+// WithStartupTimeoutDefault sets the default timeout for all inner wait strategies
+func (ms *MultiStrategy) WithStartupTimeoutDefault(timeout time.Duration) *MultiStrategy {
+ ms.timeout = &timeout
+ return ms
+}
+
+// WithStartupTimeout sets a time.Duration which limits all wait strategies
+//
+// Deprecated: use WithDeadline
+func (ms *MultiStrategy) WithStartupTimeout(timeout time.Duration) Strategy {
+ return ms.WithDeadline(timeout)
+}
+
+// WithDeadline sets a time.Duration which limits all wait strategies
+func (ms *MultiStrategy) WithDeadline(deadline time.Duration) *MultiStrategy {
+ ms.deadline = &deadline
+ return ms
+}
+
+func ForAll(strategies ...Strategy) *MultiStrategy {
+ return &MultiStrategy{
+ Strategies: strategies,
+ }
+}
+
+func (ms *MultiStrategy) Timeout() *time.Duration {
+ return ms.timeout
+}
+
+func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ var cancel context.CancelFunc
+ if ms.deadline != nil {
+ ctx, cancel = context.WithTimeout(ctx, *ms.deadline)
+ defer cancel()
+ }
+
+ if len(ms.Strategies) == 0 {
+ return errors.New("no wait strategy supplied")
+ }
+
+ for _, strategy := range ms.Strategies {
+ strategyCtx := ctx
+
+ // Set default Timeout when strategy implements StrategyTimeout
+ if st, ok := strategy.(StrategyTimeout); ok {
+ if ms.Timeout() != nil && st.Timeout() == nil {
+ strategyCtx, cancel = context.WithTimeout(ctx, *ms.Timeout())
+ defer cancel()
+ }
+ }
+
+ err := strategy.WaitUntilReady(strategyCtx, target)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go
new file mode 100644
index 0000000..3e3919a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go
@@ -0,0 +1,13 @@
+//go:build !windows
+// +build !windows
+
+package wait
+
+import (
+ "errors"
+ "syscall"
+)
+
+func isConnRefusedErr(err error) bool {
+ return errors.Is(err, syscall.ECONNREFUSED)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go
new file mode 100644
index 0000000..3ae346d
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go
@@ -0,0 +1,9 @@
+package wait
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+func isConnRefusedErr(err error) bool {
+ return err == windows.WSAECONNREFUSED
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go
new file mode 100644
index 0000000..72987c3
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go
@@ -0,0 +1,107 @@
+package wait
+
+import (
+ "context"
+ "io"
+ "time"
+
+ tcexec "github.com/testcontainers/testcontainers-go/exec"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*ExecStrategy)(nil)
+ _ StrategyTimeout = (*ExecStrategy)(nil)
+)
+
+type ExecStrategy struct {
+ // all Strategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+ cmd []string
+
+ // additional properties
+ ExitCodeMatcher func(exitCode int) bool
+ ResponseMatcher func(body io.Reader) bool
+ PollInterval time.Duration
+}
+
+// NewExecStrategy constructs an Exec strategy ...
+func NewExecStrategy(cmd []string) *ExecStrategy {
+ return &ExecStrategy{
+ cmd: cmd,
+ ExitCodeMatcher: defaultExitCodeMatcher,
+ ResponseMatcher: func(_ io.Reader) bool { return true },
+ PollInterval: defaultPollInterval(),
+ }
+}
+
+func defaultExitCodeMatcher(exitCode int) bool {
+ return exitCode == 0
+}
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (ws *ExecStrategy) WithStartupTimeout(startupTimeout time.Duration) *ExecStrategy {
+ ws.timeout = &startupTimeout
+ return ws
+}
+
+func (ws *ExecStrategy) WithExitCode(exitCode int) *ExecStrategy {
+ return ws.WithExitCodeMatcher(func(actualCode int) bool {
+ return actualCode == exitCode
+ })
+}
+
+func (ws *ExecStrategy) WithExitCodeMatcher(exitCodeMatcher func(exitCode int) bool) *ExecStrategy {
+ ws.ExitCodeMatcher = exitCodeMatcher
+ return ws
+}
+
+func (ws *ExecStrategy) WithResponseMatcher(matcher func(body io.Reader) bool) *ExecStrategy {
+ ws.ResponseMatcher = matcher
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *ExecStrategy) WithPollInterval(pollInterval time.Duration) *ExecStrategy {
+ ws.PollInterval = pollInterval
+ return ws
+}
+
+// ForExec is a convenience method to assign ExecStrategy
+func ForExec(cmd []string) *ExecStrategy {
+ return NewExecStrategy(cmd)
+}
+
+func (ws *ExecStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+func (ws *ExecStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if ws.timeout != nil {
+ timeout = *ws.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(ws.PollInterval):
+ exitCode, resp, err := target.Exec(ctx, ws.cmd, tcexec.Multiplexed())
+ if err != nil {
+ return err
+ }
+ if !ws.ExitCodeMatcher(exitCode) {
+ continue
+ }
+ if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp) {
+ continue
+ }
+
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go
new file mode 100644
index 0000000..670c8e2
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go
@@ -0,0 +1,89 @@
+package wait
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*ExitStrategy)(nil)
+ _ StrategyTimeout = (*ExitStrategy)(nil)
+)
+
+// ExitStrategy will wait until container exit
+type ExitStrategy struct {
+ // all Strategies should have a timeout to avoid waiting infinitely
+ timeout *time.Duration
+
+ // additional properties
+ PollInterval time.Duration
+}
+
+// NewExitStrategy constructs with polling interval of 100 milliseconds without timeout by default
+func NewExitStrategy() *ExitStrategy {
+ return &ExitStrategy{
+ PollInterval: defaultPollInterval(),
+ }
+}
+
+// fluent builders for each property
+// since go has neither covariance nor generics, the return type must be the type of the concrete implementation
+// this is true for all properties, even the "shared" ones
+
+// WithExitTimeout can be used to change the default exit timeout
+func (ws *ExitStrategy) WithExitTimeout(exitTimeout time.Duration) *ExitStrategy {
+ ws.timeout = &exitTimeout
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *ExitStrategy) WithPollInterval(pollInterval time.Duration) *ExitStrategy {
+ ws.PollInterval = pollInterval
+ return ws
+}
+
+// ForExit is the default construction for the fluid interface.
+//
+// For Example:
+//
+// wait.
+// ForExit().
+// WithPollInterval(1 * time.Second)
+func ForExit() *ExitStrategy {
+ return NewExitStrategy()
+}
+
+func (ws *ExitStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+// WaitUntilReady implements Strategy.WaitUntilReady
+func (ws *ExitStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ if ws.timeout != nil {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, *ws.timeout)
+ defer cancel()
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ state, err := target.State(ctx)
+ if err != nil {
+ if !strings.Contains(err.Error(), "No such container") {
+ return err
+ }
+ return nil
+ }
+ if state.Running {
+ time.Sleep(ws.PollInterval)
+ continue
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/file.go b/vendor/github.com/testcontainers/testcontainers-go/wait/file.go
new file mode 100644
index 0000000..d9cab7a
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/file.go
@@ -0,0 +1,112 @@
+package wait
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/docker/docker/errdefs"
+)
+
+var (
+ _ Strategy = (*FileStrategy)(nil)
+ _ StrategyTimeout = (*FileStrategy)(nil)
+)
+
+// FileStrategy waits for a file to exist in the container.
+type FileStrategy struct {
+ timeout *time.Duration
+ file string
+ pollInterval time.Duration
+ matcher func(io.Reader) error
+}
+
+// NewFileStrategy constructs an FileStrategy strategy.
+func NewFileStrategy(file string) *FileStrategy {
+ return &FileStrategy{
+ file: file,
+ pollInterval: defaultPollInterval(),
+ }
+}
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (ws *FileStrategy) WithStartupTimeout(startupTimeout time.Duration) *FileStrategy {
+ ws.timeout = &startupTimeout
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *FileStrategy) WithPollInterval(pollInterval time.Duration) *FileStrategy {
+ ws.pollInterval = pollInterval
+ return ws
+}
+
+// WithMatcher can be used to consume the file content.
+// The matcher can return an errdefs.ErrNotFound to indicate that the file is not ready.
+// Any other error will be considered a failure.
+// Default: nil, will only wait for the file to exist.
+func (ws *FileStrategy) WithMatcher(matcher func(io.Reader) error) *FileStrategy {
+ ws.matcher = matcher
+ return ws
+}
+
+// ForFile is a convenience method to assign FileStrategy
+func ForFile(file string) *FileStrategy {
+ return NewFileStrategy(file)
+}
+
+// Timeout returns the timeout for the strategy
+func (ws *FileStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+// WaitUntilReady waits until the file exists in the container and copies it to the target.
+func (ws *FileStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if ws.timeout != nil {
+ timeout = *ws.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ timer := time.NewTicker(ws.pollInterval)
+ defer timer.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ if err := ws.matchFile(ctx, target); err != nil {
+ if errdefs.IsNotFound(err) {
+ // Not found, continue polling.
+ continue
+ }
+
+ return fmt.Errorf("copy from container: %w", err)
+ }
+ return nil
+ }
+ }
+}
+
+// matchFile tries to copy the file from the container and match it.
+func (ws *FileStrategy) matchFile(ctx context.Context, target StrategyTarget) error {
+ rc, err := target.CopyFileFromContainer(ctx, ws.file)
+ if err != nil {
+ return fmt.Errorf("copy from container: %w", err)
+ }
+ defer rc.Close()
+
+ if ws.matcher == nil {
+ // No matcher, just check if the file exists.
+ return nil
+ }
+
+ if err = ws.matcher(rc); err != nil {
+ return fmt.Errorf("matcher: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/health.go b/vendor/github.com/testcontainers/testcontainers-go/wait/health.go
new file mode 100644
index 0000000..06a9ad1
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/health.go
@@ -0,0 +1,92 @@
+package wait
+
+import (
+ "context"
+ "time"
+
+ "github.com/docker/docker/api/types"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*HealthStrategy)(nil)
+ _ StrategyTimeout = (*HealthStrategy)(nil)
+)
+
+// HealthStrategy will wait until the container becomes healthy
+type HealthStrategy struct {
+ // all Strategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+
+ // additional properties
+ PollInterval time.Duration
+}
+
+// NewHealthStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default
+func NewHealthStrategy() *HealthStrategy {
+ return &HealthStrategy{
+ PollInterval: defaultPollInterval(),
+ }
+}
+
+// fluent builders for each property
+// since go has neither covariance nor generics, the return type must be the type of the concrete implementation
+// this is true for all properties, even the "shared" ones like startupTimeout
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (ws *HealthStrategy) WithStartupTimeout(startupTimeout time.Duration) *HealthStrategy {
+ ws.timeout = &startupTimeout
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *HealthStrategy) WithPollInterval(pollInterval time.Duration) *HealthStrategy {
+ ws.PollInterval = pollInterval
+ return ws
+}
+
+// ForHealthCheck is the default construction for the fluid interface.
+//
+// For Example:
+//
+// wait.
+// ForHealthCheck().
+// WithPollInterval(1 * time.Second)
+func ForHealthCheck() *HealthStrategy {
+ return NewHealthStrategy()
+}
+
+func (ws *HealthStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+// WaitUntilReady implements Strategy.WaitUntilReady
+func (ws *HealthStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if ws.timeout != nil {
+ timeout = *ws.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ state, err := target.State(ctx)
+ if err != nil {
+ return err
+ }
+ if err := checkState(state); err != nil {
+ return err
+ }
+ if state.Health == nil || state.Health.Status != types.Healthy {
+ time.Sleep(ws.PollInterval)
+ continue
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go
new file mode 100644
index 0000000..2070bf1
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go
@@ -0,0 +1,245 @@
+package wait
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/docker/go-connections/nat"
+
+ "github.com/testcontainers/testcontainers-go/log"
+)
+
+const (
+ exitEaccess = 126 // container cmd can't be invoked (permission denied)
+ exitCmdNotFound = 127 // container cmd not found/does not exist or invalid bind-mount
+)
+
+// Implement interface
+var (
+ _ Strategy = (*HostPortStrategy)(nil)
+ _ StrategyTimeout = (*HostPortStrategy)(nil)
+)
+
+var (
+ errShellNotExecutable = errors.New("/bin/sh command not executable")
+ errShellNotFound = errors.New("/bin/sh command not found")
+)
+
+type HostPortStrategy struct {
+ // Port is a string containing port number and protocol in the format "80/tcp"
+ // which
+ Port nat.Port
+ // all WaitStrategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+ PollInterval time.Duration
+
+ // skipInternalCheck is a flag to skip the internal check, which is useful when
+ // a shell is not available in the container or when the container doesn't bind
+ // the port internally until additional conditions are met.
+ skipInternalCheck bool
+}
+
+// NewHostPortStrategy constructs a default host port strategy that waits for the given
+// port to be exposed. The default startup timeout is 60 seconds.
+func NewHostPortStrategy(port nat.Port) *HostPortStrategy {
+ return &HostPortStrategy{
+ Port: port,
+ PollInterval: defaultPollInterval(),
+ }
+}
+
+// fluent builders for each property
+// since go has neither covariance nor generics, the return type must be the type of the concrete implementation
+// this is true for all properties, even the "shared" ones like startupTimeout
+
+// ForListeningPort returns a host port strategy that waits for the given port
+// to be exposed and bound internally the container.
+// Alias for `NewHostPortStrategy(port)`.
+func ForListeningPort(port nat.Port) *HostPortStrategy {
+ return NewHostPortStrategy(port)
+}
+
+// ForExposedPort returns a host port strategy that waits for the first port
+// to be exposed and bound internally the container.
+func ForExposedPort() *HostPortStrategy {
+ return NewHostPortStrategy("")
+}
+
+// SkipInternalCheck changes the host port strategy to skip the internal check,
+// which is useful when a shell is not available in the container or when the
+// container doesn't bind the port internally until additional conditions are met.
+func (hp *HostPortStrategy) SkipInternalCheck() *HostPortStrategy {
+ hp.skipInternalCheck = true
+
+ return hp
+}
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (hp *HostPortStrategy) WithStartupTimeout(startupTimeout time.Duration) *HostPortStrategy {
+ hp.timeout = &startupTimeout
+ return hp
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (hp *HostPortStrategy) WithPollInterval(pollInterval time.Duration) *HostPortStrategy {
+ hp.PollInterval = pollInterval
+ return hp
+}
+
+func (hp *HostPortStrategy) Timeout() *time.Duration {
+ return hp.timeout
+}
+
+// WaitUntilReady implements Strategy.WaitUntilReady
+func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if hp.timeout != nil {
+ timeout = *hp.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ ipAddress, err := target.Host(ctx)
+ if err != nil {
+ return err
+ }
+
+ waitInterval := hp.PollInterval
+
+ internalPort := hp.Port
+ if internalPort == "" {
+ inspect, err := target.Inspect(ctx)
+ if err != nil {
+ return err
+ }
+
+ for port := range inspect.NetworkSettings.Ports {
+ if internalPort == "" || port.Int() < internalPort.Int() {
+ internalPort = port
+ }
+ }
+ }
+
+ if internalPort == "" {
+ return errors.New("no port to wait for")
+ }
+
+ var port nat.Port
+ port, err = target.MappedPort(ctx, internalPort)
+ i := 0
+
+ for port == "" {
+ i++
+
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("mapped port: retries: %d, port: %q, last err: %w, ctx err: %w", i, port, err, ctx.Err())
+ case <-time.After(waitInterval):
+ if err := checkTarget(ctx, target); err != nil {
+ return fmt.Errorf("check target: retries: %d, port: %q, last err: %w", i, port, err)
+ }
+ port, err = target.MappedPort(ctx, internalPort)
+ if err != nil {
+ log.Printf("mapped port: retries: %d, port: %q, err: %s\n", i, port, err)
+ }
+ }
+ }
+
+ if err := externalCheck(ctx, ipAddress, port, target, waitInterval); err != nil {
+ return fmt.Errorf("external check: %w", err)
+ }
+
+ if hp.skipInternalCheck {
+ return nil
+ }
+
+ if err = internalCheck(ctx, internalPort, target); err != nil {
+ switch {
+ case errors.Is(err, errShellNotExecutable):
+ log.Printf("Shell not executable in container, only external port validated")
+ return nil
+ case errors.Is(err, errShellNotFound):
+ log.Printf("Shell not found in container")
+ return nil
+ default:
+ return fmt.Errorf("internal check: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func externalCheck(ctx context.Context, ipAddress string, port nat.Port, target StrategyTarget, waitInterval time.Duration) error {
+ proto := port.Proto()
+ portNumber := port.Int()
+ portString := strconv.Itoa(portNumber)
+
+ dialer := net.Dialer{}
+ address := net.JoinHostPort(ipAddress, portString)
+ for i := 0; ; i++ {
+ if err := checkTarget(ctx, target); err != nil {
+ return fmt.Errorf("check target: retries: %d address: %s: %w", i, address, err)
+ }
+ conn, err := dialer.DialContext(ctx, proto, address)
+ if err != nil {
+ var v *net.OpError
+ if errors.As(err, &v) {
+ var v2 *os.SyscallError
+ if errors.As(v.Err, &v2) {
+ if isConnRefusedErr(v2.Err) {
+ time.Sleep(waitInterval)
+ continue
+ }
+ }
+ }
+ return fmt.Errorf("dial: %w", err)
+ }
+
+ conn.Close()
+ return nil
+ }
+}
+
+func internalCheck(ctx context.Context, internalPort nat.Port, target StrategyTarget) error {
+ command := buildInternalCheckCommand(internalPort.Int())
+ for {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+ exitCode, _, err := target.Exec(ctx, []string{"/bin/sh", "-c", command})
+ if err != nil {
+ return fmt.Errorf("%w, host port waiting failed", err)
+ }
+
+ // Docker has an issue which override exit code 127 to 126 due to:
+ // https://github.com/moby/moby/issues/45795
+ // Handle both to ensure compatibility with Docker and Podman for now.
+ switch exitCode {
+ case 0:
+ return nil
+ case exitEaccess:
+ return errShellNotExecutable
+ case exitCmdNotFound:
+ return errShellNotFound
+ }
+ }
+}
+
+func buildInternalCheckCommand(internalPort int) string {
+ command := `(
+ cat /proc/net/tcp* | awk '{print $2}' | grep -i :%04x ||
+ nc -vz -w 1 localhost %d ||
+ /bin/sh -c '</dev/tcp/localhost/%d'
+ )
+ `
+ return "true && " + fmt.Sprintf(command, internalPort, internalPort, internalPort)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/http.go b/vendor/github.com/testcontainers/testcontainers-go/wait/http.go
new file mode 100644
index 0000000..2c7c655
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/http.go
@@ -0,0 +1,338 @@
+package wait
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-connections/nat"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*HTTPStrategy)(nil)
+ _ StrategyTimeout = (*HTTPStrategy)(nil)
+)
+
+type HTTPStrategy struct {
+ // all Strategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+
+ // additional properties
+ Port nat.Port
+ Path string
+ StatusCodeMatcher func(status int) bool
+ ResponseMatcher func(body io.Reader) bool
+ UseTLS bool
+ AllowInsecure bool
+ TLSConfig *tls.Config // TLS config for HTTPS
+ Method string // http method
+ Body io.Reader // http request body
+ Headers map[string]string
+ ResponseHeadersMatcher func(headers http.Header) bool
+ PollInterval time.Duration
+ UserInfo *url.Userinfo
+ ForceIPv4LocalHost bool
+}
+
+// NewHTTPStrategy constructs a HTTP strategy waiting on port 80 and status code 200
+func NewHTTPStrategy(path string) *HTTPStrategy {
+ return &HTTPStrategy{
+ Port: "",
+ Path: path,
+ StatusCodeMatcher: defaultStatusCodeMatcher,
+ ResponseMatcher: func(_ io.Reader) bool { return true },
+ UseTLS: false,
+ TLSConfig: nil,
+ Method: http.MethodGet,
+ Body: nil,
+ Headers: map[string]string{},
+ ResponseHeadersMatcher: func(_ http.Header) bool { return true },
+ PollInterval: defaultPollInterval(),
+ UserInfo: nil,
+ }
+}
+
+func defaultStatusCodeMatcher(status int) bool {
+ return status == http.StatusOK
+}
+
+// fluent builders for each property
+// since go has neither covariance nor generics, the return type must be the type of the concrete implementation
+// this is true for all properties, even the "shared" ones like startupTimeout
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (ws *HTTPStrategy) WithStartupTimeout(timeout time.Duration) *HTTPStrategy {
+ ws.timeout = &timeout
+ return ws
+}
+
+// WithPort set the port to wait for.
+// Default is the lowest numbered port.
+func (ws *HTTPStrategy) WithPort(port nat.Port) *HTTPStrategy {
+ ws.Port = port
+ return ws
+}
+
+func (ws *HTTPStrategy) WithStatusCodeMatcher(statusCodeMatcher func(status int) bool) *HTTPStrategy {
+ ws.StatusCodeMatcher = statusCodeMatcher
+ return ws
+}
+
+func (ws *HTTPStrategy) WithResponseMatcher(matcher func(body io.Reader) bool) *HTTPStrategy {
+ ws.ResponseMatcher = matcher
+ return ws
+}
+
+func (ws *HTTPStrategy) WithTLS(useTLS bool, tlsconf ...*tls.Config) *HTTPStrategy {
+ ws.UseTLS = useTLS
+ if useTLS && len(tlsconf) > 0 {
+ ws.TLSConfig = tlsconf[0]
+ }
+ return ws
+}
+
+func (ws *HTTPStrategy) WithAllowInsecure(allowInsecure bool) *HTTPStrategy {
+ ws.AllowInsecure = allowInsecure
+ return ws
+}
+
+func (ws *HTTPStrategy) WithMethod(method string) *HTTPStrategy {
+ ws.Method = method
+ return ws
+}
+
+func (ws *HTTPStrategy) WithBody(reqdata io.Reader) *HTTPStrategy {
+ ws.Body = reqdata
+ return ws
+}
+
+func (ws *HTTPStrategy) WithHeaders(headers map[string]string) *HTTPStrategy {
+ ws.Headers = headers
+ return ws
+}
+
+func (ws *HTTPStrategy) WithResponseHeadersMatcher(matcher func(http.Header) bool) *HTTPStrategy {
+ ws.ResponseHeadersMatcher = matcher
+ return ws
+}
+
+func (ws *HTTPStrategy) WithBasicAuth(username, password string) *HTTPStrategy {
+ ws.UserInfo = url.UserPassword(username, password)
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *HTTPStrategy) WithPollInterval(pollInterval time.Duration) *HTTPStrategy {
+ ws.PollInterval = pollInterval
+ return ws
+}
+
+// WithForcedIPv4LocalHost forces usage of localhost to be ipv4 127.0.0.1
+// to avoid ipv6 docker bugs https://github.com/moby/moby/issues/42442 https://github.com/moby/moby/issues/42375
+func (ws *HTTPStrategy) WithForcedIPv4LocalHost() *HTTPStrategy {
+ ws.ForceIPv4LocalHost = true
+ return ws
+}
+
+// ForHTTP is a convenience method similar to Wait.java
+// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java
+func ForHTTP(path string) *HTTPStrategy {
+ return NewHTTPStrategy(path)
+}
+
+func (ws *HTTPStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+// WaitUntilReady implements Strategy.WaitUntilReady
+func (ws *HTTPStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if ws.timeout != nil {
+ timeout = *ws.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ ipAddress, err := target.Host(ctx)
+ if err != nil {
+ return err
+ }
+ // to avoid ipv6 docker bugs https://github.com/moby/moby/issues/42442 https://github.com/moby/moby/issues/42375
+ if ws.ForceIPv4LocalHost {
+ ipAddress = strings.Replace(ipAddress, "localhost", "127.0.0.1", 1)
+ }
+
+ var mappedPort nat.Port
+ if ws.Port == "" {
+ // We wait one polling interval before we grab the ports
+ // otherwise they might not be bound yet on startup.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(ws.PollInterval):
+ // Port should now be bound so just continue.
+ }
+
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+
+ inspect, err := target.Inspect(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Find the lowest numbered exposed tcp port.
+ var lowestPort nat.Port
+ var hostPort string
+ for port, bindings := range inspect.NetworkSettings.Ports {
+ if len(bindings) == 0 || port.Proto() != "tcp" {
+ continue
+ }
+
+ if lowestPort == "" || port.Int() < lowestPort.Int() {
+ lowestPort = port
+ hostPort = bindings[0].HostPort
+ }
+ }
+
+ if lowestPort == "" {
+ return errors.New("No exposed tcp ports or mapped ports - cannot wait for status")
+ }
+
+ mappedPort, _ = nat.NewPort(lowestPort.Proto(), hostPort)
+ } else {
+ mappedPort, err = target.MappedPort(ctx, ws.Port)
+
+ for mappedPort == "" {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("%w: %w", ctx.Err(), err)
+ case <-time.After(ws.PollInterval):
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+
+ mappedPort, err = target.MappedPort(ctx, ws.Port)
+ }
+ }
+
+ if mappedPort.Proto() != "tcp" {
+ return errors.New("Cannot use HTTP client on non-TCP ports")
+ }
+ }
+
+ switch ws.Method {
+ case http.MethodGet, http.MethodHead, http.MethodPost,
+ http.MethodPut, http.MethodPatch, http.MethodDelete,
+ http.MethodConnect, http.MethodOptions, http.MethodTrace:
+ default:
+ if ws.Method != "" {
+ return fmt.Errorf("invalid http method %q", ws.Method)
+ }
+ ws.Method = http.MethodGet
+ }
+
+ tripper := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ TLSClientConfig: ws.TLSConfig,
+ }
+
+ var proto string
+ if ws.UseTLS {
+ proto = "https"
+ if ws.AllowInsecure {
+ if ws.TLSConfig == nil {
+ tripper.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+ } else {
+ ws.TLSConfig.InsecureSkipVerify = true
+ }
+ }
+ } else {
+ proto = "http"
+ }
+
+ client := http.Client{Transport: tripper, Timeout: time.Second}
+ address := net.JoinHostPort(ipAddress, strconv.Itoa(mappedPort.Int()))
+
+ endpoint, err := url.Parse(ws.Path)
+ if err != nil {
+ return err
+ }
+ endpoint.Scheme = proto
+ endpoint.Host = address
+
+ if ws.UserInfo != nil {
+ endpoint.User = ws.UserInfo
+ }
+
+ // cache the body into a byte-slice so that it can be iterated over multiple times
+ var body []byte
+ if ws.Body != nil {
+ body, err = io.ReadAll(ws.Body)
+ if err != nil {
+ return err
+ }
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(ws.PollInterval):
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, ws.Method, endpoint.String(), bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+
+ for k, v := range ws.Headers {
+ req.Header.Set(k, v)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ continue
+ }
+ if ws.StatusCodeMatcher != nil && !ws.StatusCodeMatcher(resp.StatusCode) {
+ _ = resp.Body.Close()
+ continue
+ }
+ if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp.Body) {
+ _ = resp.Body.Close()
+ continue
+ }
+ if ws.ResponseHeadersMatcher != nil && !ws.ResponseHeadersMatcher(resp.Header) {
+ _ = resp.Body.Close()
+ continue
+ }
+ if err := resp.Body.Close(); err != nil {
+ continue
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go
new file mode 100644
index 0000000..41c96e3
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go
@@ -0,0 +1,214 @@
+package wait
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "time"
+)
+
+// Implement interface
+var (
+ _ Strategy = (*LogStrategy)(nil)
+ _ StrategyTimeout = (*LogStrategy)(nil)
+)
+
+// PermanentError is a special error that will stop the wait and return an error.
+type PermanentError struct {
+ err error
+}
+
+// Error implements the error interface.
+func (e *PermanentError) Error() string {
+ return e.err.Error()
+}
+
+// NewPermanentError creates a new PermanentError.
+func NewPermanentError(err error) *PermanentError {
+ return &PermanentError{err: err}
+}
+
+// LogStrategy will wait until a given log entry shows up in the docker logs
+type LogStrategy struct {
+ // all Strategies should have a startupTimeout to avoid waiting infinitely
+ timeout *time.Duration
+
+ // additional properties
+ Log string
+ IsRegexp bool
+ Occurrence int
+ PollInterval time.Duration
+
+ // check is the function that will be called to check if the log entry is present.
+ check func([]byte) error
+
+ // submatchCallback is a callback that will be called with the sub matches of the regexp.
+ submatchCallback func(pattern string, matches [][][]byte) error
+
+ // re is the optional compiled regexp.
+ re *regexp.Regexp
+
+ // log byte slice version of [LogStrategy.Log] used for count checks.
+ log []byte
+}
+
+// NewLogStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default
+func NewLogStrategy(log string) *LogStrategy {
+ return &LogStrategy{
+ Log: log,
+ IsRegexp: false,
+ Occurrence: 1,
+ PollInterval: defaultPollInterval(),
+ }
+}
+
+// fluent builders for each property
+// since go has neither covariance nor generics, the return type must be the type of the concrete implementation
+// this is true for all properties, even the "shared" ones like startupTimeout
+
+// AsRegexp can be used to change the default behavior of the log strategy to use regexp instead of plain text
+func (ws *LogStrategy) AsRegexp() *LogStrategy {
+ ws.IsRegexp = true
+ return ws
+}
+
+// Submatch configures a function that will be called with the result of
+// [regexp.Regexp.FindAllSubmatch], allowing the caller to process the results.
+// If the callback returns nil, the strategy will be considered successful.
+// Returning a [PermanentError] will stop the wait and return an error, otherwise
+// it will retry until the timeout is reached.
+// [LogStrategy.Occurrence] is ignored if this option is set.
+func (ws *LogStrategy) Submatch(callback func(pattern string, matches [][][]byte) error) *LogStrategy {
+ ws.submatchCallback = callback
+
+ return ws
+}
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (ws *LogStrategy) WithStartupTimeout(timeout time.Duration) *LogStrategy {
+ ws.timeout = &timeout
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (ws *LogStrategy) WithPollInterval(pollInterval time.Duration) *LogStrategy {
+ ws.PollInterval = pollInterval
+ return ws
+}
+
+func (ws *LogStrategy) WithOccurrence(o int) *LogStrategy {
+ // the number of occurrence needs to be positive
+ if o <= 0 {
+ o = 1
+ }
+ ws.Occurrence = o
+ return ws
+}
+
+// ForLog is the default construction for the fluid interface.
+//
+// For Example:
+//
+// wait.
+// ForLog("some text").
+// WithPollInterval(1 * time.Second)
+func ForLog(log string) *LogStrategy {
+ return NewLogStrategy(log)
+}
+
+func (ws *LogStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+// WaitUntilReady implements Strategy.WaitUntilReady
+func (ws *LogStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if ws.timeout != nil {
+ timeout = *ws.timeout
+ }
+
+ switch {
+ case ws.submatchCallback != nil:
+ ws.re = regexp.MustCompile(ws.Log)
+ ws.check = ws.checkSubmatch
+ case ws.IsRegexp:
+ ws.re = regexp.MustCompile(ws.Log)
+ ws.check = ws.checkRegexp
+ default:
+ ws.log = []byte(ws.Log)
+ ws.check = ws.checkCount
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ var lastLen int
+ var lastError error
+ for {
+ select {
+ case <-ctx.Done():
+ return errors.Join(lastError, ctx.Err())
+ default:
+ checkErr := checkTarget(ctx, target)
+
+ reader, err := target.Logs(ctx)
+ if err != nil {
+ // TODO: fix as this will wait for timeout if the logs are not available.
+ time.Sleep(ws.PollInterval)
+ continue
+ }
+
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ // TODO: fix as this will wait for timeout if the logs are not readable.
+ time.Sleep(ws.PollInterval)
+ continue
+ }
+
+ if lastLen == len(b) && checkErr != nil {
+ // Log length hasn't changed so we're not making progress.
+ return checkErr
+ }
+
+ if err := ws.check(b); err != nil {
+ var errPermanent *PermanentError
+ if errors.As(err, &errPermanent) {
+ return err
+ }
+
+ lastError = err
+ lastLen = len(b)
+ time.Sleep(ws.PollInterval)
+ continue
+ }
+
+ return nil
+ }
+ }
+}
+
+// checkCount checks if the log entry is present in the logs using a string count.
+func (ws *LogStrategy) checkCount(b []byte) error {
+ if count := bytes.Count(b, ws.log); count < ws.Occurrence {
+ return fmt.Errorf("%q matched %d times, expected %d", ws.Log, count, ws.Occurrence)
+ }
+
+ return nil
+}
+
+// checkRegexp checks if the log entry is present in the logs using a regexp count.
+func (ws *LogStrategy) checkRegexp(b []byte) error {
+ if matches := ws.re.FindAll(b, -1); len(matches) < ws.Occurrence {
+ return fmt.Errorf("`%s` matched %d times, expected %d", ws.Log, len(matches), ws.Occurrence)
+ }
+
+ return nil
+}
+
+// checkSubmatch checks if the log entry is present in the logs using a regexp sub match callback.
+func (ws *LogStrategy) checkSubmatch(b []byte) error {
+ return ws.submatchCallback(ws.Log, ws.re.FindAllSubmatch(b, -1))
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go
new file mode 100644
index 0000000..c47d83d
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go
@@ -0,0 +1,81 @@
+package wait
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/go-connections/nat"
+
+ "github.com/testcontainers/testcontainers-go/exec"
+)
+
+var (
+ _ Strategy = (*NopStrategy)(nil)
+ _ StrategyTimeout = (*NopStrategy)(nil)
+)
+
+type NopStrategy struct {
+ timeout *time.Duration
+ waitUntilReady func(context.Context, StrategyTarget) error
+}
+
+func ForNop(
+ waitUntilReady func(context.Context, StrategyTarget) error,
+) *NopStrategy {
+ return &NopStrategy{
+ waitUntilReady: waitUntilReady,
+ }
+}
+
+func (ws *NopStrategy) Timeout() *time.Duration {
+ return ws.timeout
+}
+
+func (ws *NopStrategy) WithStartupTimeout(timeout time.Duration) *NopStrategy {
+ ws.timeout = &timeout
+ return ws
+}
+
+func (ws *NopStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ return ws.waitUntilReady(ctx, target)
+}
+
+type NopStrategyTarget struct {
+ ReaderCloser io.ReadCloser
+ ContainerState container.State
+}
+
+func (st NopStrategyTarget) Host(_ context.Context) (string, error) {
+ return "", nil
+}
+
+func (st NopStrategyTarget) Inspect(_ context.Context) (*container.InspectResponse, error) {
+ return nil, nil
+}
+
+// Deprecated: use Inspect instead
+func (st NopStrategyTarget) Ports(_ context.Context) (nat.PortMap, error) {
+ return nil, nil
+}
+
+func (st NopStrategyTarget) MappedPort(_ context.Context, n nat.Port) (nat.Port, error) {
+ return n, nil
+}
+
+func (st NopStrategyTarget) Logs(_ context.Context) (io.ReadCloser, error) {
+ return st.ReaderCloser, nil
+}
+
+func (st NopStrategyTarget) Exec(_ context.Context, _ []string, _ ...exec.ProcessOption) (int, io.Reader, error) {
+ return 0, nil, nil
+}
+
+func (st NopStrategyTarget) State(_ context.Context) (*container.State, error) {
+ return &st.ContainerState, nil
+}
+
+func (st NopStrategyTarget) CopyFileFromContainer(context.Context, string) (io.ReadCloser, error) {
+ return st.ReaderCloser, nil
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go
new file mode 100644
index 0000000..1d09eda
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go
@@ -0,0 +1,118 @@
+package wait
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/docker/go-connections/nat"
+)
+
+var (
+ _ Strategy = (*waitForSQL)(nil)
+ _ StrategyTimeout = (*waitForSQL)(nil)
+)
+
+const defaultForSQLQuery = "SELECT 1"
+
+// ForSQL constructs a new waitForSql strategy for the given driver
+func ForSQL(port nat.Port, driver string, url func(host string, port nat.Port) string) *waitForSQL {
+ return &waitForSQL{
+ Port: port,
+ URL: url,
+ Driver: driver,
+ startupTimeout: defaultStartupTimeout(),
+ PollInterval: defaultPollInterval(),
+ query: defaultForSQLQuery,
+ }
+}
+
+type waitForSQL struct {
+ timeout *time.Duration
+
+ URL func(host string, port nat.Port) string
+ Driver string
+ Port nat.Port
+ startupTimeout time.Duration
+ PollInterval time.Duration
+ query string
+}
+
+// WithStartupTimeout can be used to change the default startup timeout
+func (w *waitForSQL) WithStartupTimeout(timeout time.Duration) *waitForSQL {
+ w.timeout = &timeout
+ return w
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds
+func (w *waitForSQL) WithPollInterval(pollInterval time.Duration) *waitForSQL {
+ w.PollInterval = pollInterval
+ return w
+}
+
+// WithQuery can be used to override the default query used in the strategy.
+func (w *waitForSQL) WithQuery(query string) *waitForSQL {
+ w.query = query
+ return w
+}
+
+func (w *waitForSQL) Timeout() *time.Duration {
+ return w.timeout
+}
+
+// WaitUntilReady repeatedly tries to run "SELECT 1" or user defined query on the given port using sql and driver.
+//
+// If it doesn't succeed until the timeout value which defaults to 60 seconds, it will return an error.
+func (w *waitForSQL) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ timeout := defaultStartupTimeout()
+ if w.timeout != nil {
+ timeout = *w.timeout
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ host, err := target.Host(ctx)
+ if err != nil {
+ return err
+ }
+
+ ticker := time.NewTicker(w.PollInterval)
+ defer ticker.Stop()
+
+ var port nat.Port
+ port, err = target.MappedPort(ctx, w.Port)
+
+ for port == "" {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("%w: %w", ctx.Err(), err)
+ case <-ticker.C:
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+ port, err = target.MappedPort(ctx, w.Port)
+ }
+ }
+
+ db, err := sql.Open(w.Driver, w.URL(host, port))
+ if err != nil {
+ return fmt.Errorf("sql.Open: %w", err)
+ }
+ defer db.Close()
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ if err := checkTarget(ctx, target); err != nil {
+ return err
+ }
+ if _, err := db.ExecContext(ctx, w.query); err != nil {
+ continue
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go
new file mode 100644
index 0000000..ab904b2
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go
@@ -0,0 +1,167 @@
+package wait
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "time"
+)
+
+// Validate we implement interface.
+var _ Strategy = (*TLSStrategy)(nil)
+
+// TLSStrategy is a strategy for handling TLS.
+type TLSStrategy struct {
+ // General Settings.
+ timeout *time.Duration
+ pollInterval time.Duration
+
+ // Custom Settings.
+ certFiles *x509KeyPair
+ rootFiles []string
+
+ // State.
+ tlsConfig *tls.Config
+}
+
+// x509KeyPair is a pair of certificate and key files.
+type x509KeyPair struct {
+ certPEMFile string
+ keyPEMFile string
+}
+
+// ForTLSCert returns a CertStrategy that will add a Certificate to the [tls.Config]
+// constructed from PEM formatted certificate key file pair in the container.
+func ForTLSCert(certPEMFile, keyPEMFile string) *TLSStrategy {
+ return &TLSStrategy{
+ certFiles: &x509KeyPair{
+ certPEMFile: certPEMFile,
+ keyPEMFile: keyPEMFile,
+ },
+ tlsConfig: &tls.Config{},
+ pollInterval: defaultPollInterval(),
+ }
+}
+
+// ForTLSRootCAs returns a CertStrategy that sets the root CAs for the [tls.Config]
+// using the given PEM formatted files from the container.
+func ForTLSRootCAs(pemFiles ...string) *TLSStrategy {
+ return &TLSStrategy{
+ rootFiles: pemFiles,
+ tlsConfig: &tls.Config{},
+ pollInterval: defaultPollInterval(),
+ }
+}
+
+// WithRootCAs sets the root CAs for the [tls.Config] using the given files from
+// the container.
+func (ws *TLSStrategy) WithRootCAs(files ...string) *TLSStrategy {
+ ws.rootFiles = files
+ return ws
+}
+
+// WithCert sets the [tls.Config] Certificates using the given files from the container.
+func (ws *TLSStrategy) WithCert(certPEMFile, keyPEMFile string) *TLSStrategy {
+ ws.certFiles = &x509KeyPair{
+ certPEMFile: certPEMFile,
+ keyPEMFile: keyPEMFile,
+ }
+ return ws
+}
+
+// WithServerName sets the server for the [tls.Config].
+func (ws *TLSStrategy) WithServerName(serverName string) *TLSStrategy {
+ ws.tlsConfig.ServerName = serverName
+ return ws
+}
+
+// WithStartupTimeout can be used to change the default startup timeout.
+func (ws *TLSStrategy) WithStartupTimeout(startupTimeout time.Duration) *TLSStrategy {
+ ws.timeout = &startupTimeout
+ return ws
+}
+
+// WithPollInterval can be used to override the default polling interval of 100 milliseconds.
+func (ws *TLSStrategy) WithPollInterval(pollInterval time.Duration) *TLSStrategy {
+ ws.pollInterval = pollInterval
+ return ws
+}
+
+// TLSConfig returns the TLS config once the strategy is ready.
+// If the strategy is nil, it returns nil.
+func (ws *TLSStrategy) TLSConfig() *tls.Config {
+ if ws == nil {
+ return nil
+ }
+
+ return ws.tlsConfig
+}
+
+// WaitUntilReady implements the [Strategy] interface.
+// It waits for the CA, client cert and key files to be available in the container and
+// uses them to setup the TLS config.
+func (ws *TLSStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error {
+ size := len(ws.rootFiles)
+ if ws.certFiles != nil {
+ size += 2
+ }
+ strategies := make([]Strategy, 0, size)
+ for _, file := range ws.rootFiles {
+ strategies = append(strategies,
+ ForFile(file).WithMatcher(func(r io.Reader) error {
+ buf, err := io.ReadAll(r)
+ if err != nil {
+ return fmt.Errorf("read CA cert file %q: %w", file, err)
+ }
+
+ if ws.tlsConfig.RootCAs == nil {
+ ws.tlsConfig.RootCAs = x509.NewCertPool()
+ }
+
+ if !ws.tlsConfig.RootCAs.AppendCertsFromPEM(buf) {
+ return fmt.Errorf("invalid CA cert file %q", file)
+ }
+
+ return nil
+ }).WithPollInterval(ws.pollInterval),
+ )
+ }
+
+ if ws.certFiles != nil {
+ var certPEMBlock []byte
+ strategies = append(strategies,
+ ForFile(ws.certFiles.certPEMFile).WithMatcher(func(r io.Reader) error {
+ var err error
+ if certPEMBlock, err = io.ReadAll(r); err != nil {
+ return fmt.Errorf("read certificate cert %q: %w", ws.certFiles.certPEMFile, err)
+ }
+
+ return nil
+ }).WithPollInterval(ws.pollInterval),
+ ForFile(ws.certFiles.keyPEMFile).WithMatcher(func(r io.Reader) error {
+ keyPEMBlock, err := io.ReadAll(r)
+ if err != nil {
+ return fmt.Errorf("read certificate key %q: %w", ws.certFiles.keyPEMFile, err)
+ }
+
+ cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return fmt.Errorf("x509 key pair %q %q: %w", ws.certFiles.certPEMFile, ws.certFiles.keyPEMFile, err)
+ }
+
+ ws.tlsConfig.Certificates = []tls.Certificate{cert}
+
+ return nil
+ }).WithPollInterval(ws.pollInterval),
+ )
+ }
+
+ strategy := ForAll(strategies...)
+ if ws.timeout != nil {
+ strategy.WithStartupTimeout(*ws.timeout)
+ }
+
+ return strategy.WaitUntilReady(ctx, target)
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go
new file mode 100644
index 0000000..ca5a7db
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go
@@ -0,0 +1,65 @@
+package wait
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/go-connections/nat"
+
+ "github.com/testcontainers/testcontainers-go/exec"
+)
+
+// Strategy defines the basic interface for a Wait Strategy
+type Strategy interface {
+ WaitUntilReady(context.Context, StrategyTarget) error
+}
+
+// StrategyTimeout allows MultiStrategy to configure a Strategy's Timeout
+type StrategyTimeout interface {
+ Timeout() *time.Duration
+}
+
+type StrategyTarget interface {
+ Host(context.Context) (string, error)
+ Inspect(context.Context) (*container.InspectResponse, error)
+ Ports(ctx context.Context) (nat.PortMap, error) // Deprecated: use Inspect instead
+ MappedPort(context.Context, nat.Port) (nat.Port, error)
+ Logs(context.Context) (io.ReadCloser, error)
+ Exec(context.Context, []string, ...exec.ProcessOption) (int, io.Reader, error)
+ State(context.Context) (*container.State, error)
+ CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error)
+}
+
+func checkTarget(ctx context.Context, target StrategyTarget) error {
+ state, err := target.State(ctx)
+ if err != nil {
+ return fmt.Errorf("get state: %w", err)
+ }
+
+ return checkState(state)
+}
+
+func checkState(state *container.State) error {
+ switch {
+ case state.Running:
+ return nil
+ case state.OOMKilled:
+ return errors.New("container crashed with out-of-memory (OOMKilled)")
+ case state.Status == "exited":
+ return fmt.Errorf("container exited with code %d", state.ExitCode)
+ default:
+ return fmt.Errorf("unexpected container status %q", state.Status)
+ }
+}
+
+func defaultStartupTimeout() time.Duration {
+ return 60 * time.Second
+}
+
+func defaultPollInterval() time.Duration {
+ return 100 * time.Millisecond
+}
diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go
new file mode 100644
index 0000000..4685e50
--- /dev/null
+++ b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go
@@ -0,0 +1,74 @@
+package wait
+
+import (
+ "errors"
+)
+
+var (
+ // VisitStop is used as a return value from [VisitFunc] to stop the walk.
+ // It is not returned as an error by any function.
+ VisitStop = errors.New("stop the walk")
+
+ // VisitRemove is used as a return value from [VisitFunc] to have the current node removed.
+ // It is not returned as an error by any function.
+ VisitRemove = errors.New("remove this strategy")
+)
+
+// VisitFunc is a function that visits a strategy node.
+// If it returns [VisitStop], the walk stops.
+// If it returns [VisitRemove], the current node is removed.
+type VisitFunc func(root Strategy) error
+
+// Walk walks the strategies tree and calls the visit function for each node.
+func Walk(root *Strategy, visit VisitFunc) error {
+ if root == nil {
+ return errors.New("root strategy is nil")
+ }
+
+ if err := walk(root, visit); err != nil {
+ if errors.Is(err, VisitRemove) || errors.Is(err, VisitStop) {
+ return nil
+ }
+ return err
+ }
+
+ return nil
+}
+
+// walk walks the strategies tree and calls the visit function for each node.
+// It returns an error if the visit function returns an error.
+func walk(root *Strategy, visit VisitFunc) error {
+ if *root == nil {
+ // No strategy.
+ return nil
+ }
+
+ // Allow the visit function to customize the behaviour of the walk before visiting the children.
+ if err := visit(*root); err != nil {
+ if errors.Is(err, VisitRemove) {
+ *root = nil
+ }
+
+ return err
+ }
+
+ if s, ok := (*root).(*MultiStrategy); ok {
+ var i int
+ for range s.Strategies {
+ if err := walk(&s.Strategies[i], visit); err != nil {
+ if errors.Is(err, VisitRemove) {
+ s.Strategies = append(s.Strategies[:i], s.Strategies[i+1:]...)
+ if errors.Is(err, VisitStop) {
+ return VisitStop
+ }
+ continue
+ }
+
+ return err
+ }
+ i++
+ }
+ }
+
+ return nil
+}